From 7943f099d60f54129e46643057810c34e9dff9f5 Mon Sep 17 00:00:00 2001 From: novoselov Date: Thu, 27 Jun 2024 18:50:48 +0500 Subject: [PATCH 01/37] fix scale with windows nodes in cluster issue 4908 Signed-off-by: novoselov --- pkg/scalers/selenium_grid_scaler.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index 4e1f75667ad..451fdf4fb53 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -63,6 +63,7 @@ type seleniumSession struct { ID string `json:"id"` Capabilities string `json:"capabilities"` NodeID string `json:"nodeId"` + PlatformName string `json:"platformName"` } type capability struct { @@ -152,7 +153,7 @@ func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2.Metri func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.Logger) (int64, error) { body, err := json.Marshal(map[string]string{ - "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId } } }", + "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId, platformName } } }", }) if err != nil { @@ -198,7 +199,7 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s for _, sessionQueueRequest := range sessionQueueRequests { var capability = capability{} if err := json.Unmarshal([]byte(sessionQueueRequest), &capability); err == nil { - if capability.BrowserName == browserName { + if capability.BrowserName == browserName && !strings.EqualFold(capability.PlatformName, "windows") { var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) if strings.HasPrefix(capability.BrowserVersion, browserVersion) && platformNameMatches { count++ @@ -215,8 +216,8 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s for _, session := range sessions { var capability = capability{} if err := json.Unmarshal([]byte(session.Capabilities), &capability); err == nil { - var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) - if capability.BrowserName == sessionBrowserName { + if capability.BrowserName == sessionBrowserName && !strings.EqualFold(capability.PlatformName, "windows") { + var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) if strings.HasPrefix(capability.BrowserVersion, browserVersion) && platformNameMatches { count++ } else if browserVersion == DefaultBrowserVersion && platformNameMatches { From ff0c35357b40492ddf3b8f2f537fa9ffe8928fe5 Mon Sep 17 00:00:00 2001 From: novoselov Date: Tue, 2 Jul 2024 16:37:12 +0500 Subject: [PATCH 02/37] review fixes Signed-off-by: novoselov --- pkg/scalers/selenium_grid_scaler.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index 451fdf4fb53..dd57bd58331 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -199,11 +199,10 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s for _, sessionQueueRequest := range sessionQueueRequests { var capability = capability{} if err := json.Unmarshal([]byte(sessionQueueRequest), &capability); err == nil { - if capability.BrowserName == browserName && !strings.EqualFold(capability.PlatformName, "windows") { - var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) - if strings.HasPrefix(capability.BrowserVersion, browserVersion) && platformNameMatches { + if capability.BrowserName == browserName { + if strings.HasPrefix(capability.BrowserVersion, browserVersion) && strings.EqualFold(capability.PlatformName, platformName) { count++ - } else if len(strings.TrimSpace(capability.BrowserVersion)) == 0 && browserVersion == DefaultBrowserVersion && platformNameMatches { + } else if len(strings.TrimSpace(capability.BrowserVersion)) == 0 && browserVersion == DefaultBrowserVersion && strings.EqualFold(capability.PlatformName, platformName) { count++ } } @@ -216,11 +215,10 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s for _, session := range sessions { var capability = capability{} if err := json.Unmarshal([]byte(session.Capabilities), &capability); err == nil { - if capability.BrowserName == sessionBrowserName && !strings.EqualFold(capability.PlatformName, "windows") { - var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) - if strings.HasPrefix(capability.BrowserVersion, browserVersion) && platformNameMatches { + if capability.BrowserName == sessionBrowserName { + if strings.HasPrefix(capability.BrowserVersion, browserVersion) && strings.EqualFold(capability.PlatformName, platformName) { count++ - } else if browserVersion == DefaultBrowserVersion && platformNameMatches { + } else if browserVersion == DefaultBrowserVersion && strings.EqualFold(capability.PlatformName, platformName) { count++ } } From a6d9f7346e7bf52911480396939c575836ee06e1 Mon Sep 17 00:00:00 2001 From: Joel Smith Date: Mon, 1 Jul 2024 01:09:21 -0600 Subject: [PATCH 03/37] Add additional manifest checks to detect missing CRDs & CRs (#5921) Signed-off-by: novoselov --- .github/workflows/pr-validation.yml | 6 ++- CHANGELOG.md | 1 + ...v1alpha1_clustertriggerauthentication.yaml | 9 ++++ .../keda_v1alpha1_triggerauthentication.yaml | 2 +- config/samples/kustomization.yaml | 2 + hack/verify-manifests.sh | 54 +++++++++++++++++++ 6 files changed, 72 insertions(+), 2 deletions(-) create mode 100644 config/samples/keda_v1alpha1_clustertriggerauthentication.yaml diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 09da2bf6f60..bcd34e3984b 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -19,12 +19,16 @@ jobs: - runner: ubuntu-latest name: amd64 steps: - - name: Check out code uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: fetch-depth: 1 + - name: Install prerequisites + run: | + apt-get update + apt-get install -y jq python3-yaml + - name: Register workspace path run: git config --global --add safe.directory "$GITHUB_WORKSPACE" diff --git a/CHANGELOG.md b/CHANGELOG.md index dd14e97d968..c3c8ed1b4d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ Here is an overview of all new **experimental** features: ### Fixes +- **General**: Check for missing CRD references and sample CRs ([#5920](https://github.com/kedacore/keda/issues/5920)) - **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806)) - **MongoDB Scaler**: MongoDB url parses correctly `+srv` scheme ([#5760](https://github.com/kedacore/keda/issues/5760)) diff --git a/config/samples/keda_v1alpha1_clustertriggerauthentication.yaml b/config/samples/keda_v1alpha1_clustertriggerauthentication.yaml new file mode 100644 index 00000000000..b0ab1ec8f67 --- /dev/null +++ b/config/samples/keda_v1alpha1_clustertriggerauthentication.yaml @@ -0,0 +1,9 @@ +apiVersion: keda.sh/v1alpha1 +kind: ClusterTriggerAuthentication +metadata: + name: example-clustertriggerauthentication +spec: + secretTargetRef: + - parameter: example-secret-parameter + name: example-secret-name + key: example-role-key diff --git a/config/samples/keda_v1alpha1_triggerauthentication.yaml b/config/samples/keda_v1alpha1_triggerauthentication.yaml index a4733a1d4ac..0bc49056653 100644 --- a/config/samples/keda_v1alpha1_triggerauthentication.yaml +++ b/config/samples/keda_v1alpha1_triggerauthentication.yaml @@ -1,4 +1,4 @@ -apiVersion: keda.k8s.io/v1alpha1 +apiVersion: keda.sh/v1alpha1 kind: TriggerAuthentication metadata: name: example-triggerauthentication diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 9ef860d16a0..94ec29e2c3a 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,5 +1,7 @@ ## Append samples you want in your CSV to this file as resources ## resources: +- eventing_v1alpha1_cloudeventsource.yaml +- keda_v1alpha1_clustertriggerauthentication.yaml - keda_v1alpha1_scaledobject.yaml - keda_v1alpha1_scaledjob.yaml - keda_v1alpha1_triggerauthentication.yaml diff --git a/hack/verify-manifests.sh b/hack/verify-manifests.sh index 74c8b8a2b05..31816755274 100755 --- a/hack/verify-manifests.sh +++ b/hack/verify-manifests.sh @@ -30,6 +30,55 @@ cleanup() { } trap "cleanup" EXIT SIGINT +yaml2json() { + python3 -c 'import json, sys, yaml ; y=yaml.safe_load(sys.stdin.read()) ; json.dump(y, sys.stdout)' +} + +if ! python3 -c "import yaml" >/dev/null 2>&1; then + echo "Python module 'yaml' required for this script." + exit 1 +fi + +# Make sure all the CRDs are listed in the kustomize resource list +declare -A crds +declare -A crs +while read -r filename; do + crds["$filename"]=1 +done < <(sed -n '/^resources:$/,/^[^-]/ s#^- ##p' config/crd/kustomization.yaml) +bad_crd_resource_list=0 +for f in config/crd/bases/*.yaml; do + key="bases/$(basename "$f")" + if [ ! -v "crds[${key}]" ]; then + echo "ERROR: CRD file $f is not listed in the resources section of config/crd/kustomization.yaml" + bad_crd_resource_list=1 + else + crs[$key]="$(yaml2json < $f | jq -r '.spec.names.singular as $k | (.spec.group | sub("\\..*"; "")) as $g | .spec.versions[] | ($g+"_"+.name+"_"+$k)')" + fi +done + +# Make sure all sample CRs are listed in the kustomize resource list (part 1) +declare -A crslist +while read -r filename; do + if ! test -f "$filename"; then + crslist["$filename"]=1 + fi +done < <(sed -n '/^resources:$/,/^[^-]/ s#^- ##p' config/samples/kustomization.yaml) + +# Make sure there is a sample CR for each CRD version +for key in ${!crs[@]}; do + for gvk in ${crs[$key]}; do + if [ ! -f "config/samples/${gvk}.yaml" ]; then + echo "ERROR: CRD config/crd/$key does not have a sample CR config/samples/$gvk.yaml" + bad_crd_resource_list=1 + fi + # Make sure all sample CRs are listed in the kustomize resource list (part 2) + if [ ! -v "crslist[${gvk}.yaml]" ]; then + echo "ERROR: CR config/samples/${gvk}.yaml is not listed in the resources section of config/samples/kustomization.yaml" + bad_crd_resource_list=1 + fi + done +done + cleanup mkdir -p "${TMP_DIFFROOT}" @@ -47,3 +96,8 @@ else echo "${DIFFROOT} is out of date. Please run 'make manifests'" exit 1 fi + +if [ "$bad_crd_resource_list" != 0 ]; then + echo "Check failed due to previous errors. See output above" + exit 1 +fi From c49ad9bb66a080cd8c38aecbac6cf07dadc82574 Mon Sep 17 00:00:00 2001 From: Caleb Lemoine <21261388+circa10a@users.noreply.github.com> Date: Mon, 1 Jul 2024 11:50:51 -0700 Subject: [PATCH 04/37] Add Splunk scaler (#5905) Signed-off-by: circa10a Signed-off-by: Caleb Lemoine <21261388+circa10a@users.noreply.github.com> Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scalers/splunk/splunk.go | 120 +++++++++++ pkg/scalers/splunk/splunk_test.go | 303 ++++++++++++++++++++++++++ pkg/scalers/splunk_scaler.go | 118 +++++++++++ pkg/scalers/splunk_scaler_test.go | 95 +++++++++ pkg/scaling/scalers_builder.go | 2 + tests/scalers/splunk/splunk_test.go | 317 ++++++++++++++++++++++++++++ 7 files changed, 956 insertions(+) create mode 100644 pkg/scalers/splunk/splunk.go create mode 100644 pkg/scalers/splunk/splunk_test.go create mode 100644 pkg/scalers/splunk_scaler.go create mode 100644 pkg/scalers/splunk_scaler_test.go create mode 100644 tests/scalers/splunk/splunk_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index c3c8ed1b4d7..3a1992488e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - **General**: Add --ca-dir flag to KEDA operator to specify directories with CA certificates for scalers to authenticate TLS connections (defaults to /custom/ca) ([#5860](https://github.com/kedacore/keda/issues/5860)) - **General**: Declarative parsing of scaler config ([#5037](https://github.com/kedacore/keda/issues/5037)|[#5797](https://github.com/kedacore/keda/issues/5797)) +- **General**: Introduce new Splunk Scaler ([#5904](https://github.com/kedacore/keda/issues/5904)) - **General**: Remove deprecated Kustomize commonLabels ([#5888](https://github.com/kedacore/keda/pull/5888)) - **General**: Support for Kubernetes v1.30 ([#5828](https://github.com/kedacore/keda/issues/5828)) diff --git a/pkg/scalers/splunk/splunk.go b/pkg/scalers/splunk/splunk.go new file mode 100644 index 00000000000..0bc61c5e1fe --- /dev/null +++ b/pkg/scalers/splunk/splunk.go @@ -0,0 +1,120 @@ +package splunk + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" + kedautil "github.com/kedacore/keda/v2/pkg/util" +) + +const ( + savedSearchPathTemplateStr = "/servicesNS/%s/search/search/jobs/export" +) + +// Config contains the information required to authenticate with a Splunk instance. +type Config struct { + Host string + Username string + Password string + APIToken string + HTTPTimeout time.Duration + UnsafeSsl bool +} + +// Client contains Splunk config information as well as an http client for requests. +type Client struct { + *Config + *http.Client +} + +// SearchResponse is used for unmarshalling search results. +type SearchResponse struct { + Result map[string]string `json:"result"` +} + +// NewClient returns a new Splunk client. +func NewClient(c *Config, sc *scalersconfig.ScalerConfig) (*Client, error) { + if c.Username == "" { + return nil, errors.New("username was not set") + } + + if c.APIToken != "" && c.Password != "" { + return nil, errors.New("API token and Password were all set. If APIToken is set, username and password must not be used") + } + + httpClient := kedautil.CreateHTTPClient(sc.GlobalHTTPTimeout, c.UnsafeSsl) + + client := &Client{ + c, + httpClient, + } + + return client, nil +} + +// SavedSearch fetches the results of a saved search/report in Splunk. +func (c *Client) SavedSearch(name string) (*SearchResponse, error) { + savedSearchAPIPath := fmt.Sprintf(savedSearchPathTemplateStr, c.Username) + endpoint := fmt.Sprintf("%s%s", c.Host, savedSearchAPIPath) + + body := strings.NewReader(fmt.Sprintf("search=savedsearch %s", name)) + req, err := http.NewRequest(http.MethodPost, endpoint, body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + if c.APIToken != "" { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", c.APIToken)) + } else { + req.SetBasicAuth(c.Username, c.Password) + } + + req.URL.RawQuery = url.Values{ + "output_mode": {"json"}, + }.Encode() + + resp, err := c.Client.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode > 399 { + bodyText, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return nil, errors.New(string(bodyText)) + } + + result := &SearchResponse{} + + err = json.NewDecoder(resp.Body).Decode(&result) + + return result, err +} + +// ToMetric converts a search response to a consumable metric value. +func (s *SearchResponse) ToMetric(valueField string) (float64, error) { + metricValueStr, ok := s.Result[valueField] + if !ok { + return 0, fmt.Errorf("field: %s not found in search results", valueField) + } + + metricValueInt, err := strconv.ParseFloat(metricValueStr, 64) + if err != nil { + return 0, fmt.Errorf("value: %s is not a float value", valueField) + } + + return metricValueInt, nil +} diff --git a/pkg/scalers/splunk/splunk_test.go b/pkg/scalers/splunk/splunk_test.go new file mode 100644 index 00000000000..6201311b05a --- /dev/null +++ b/pkg/scalers/splunk/splunk_test.go @@ -0,0 +1,303 @@ +package splunk + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" +) + +func TestNewClient(t *testing.T) { + tests := []struct { + name string + config *Config + expectErr bool + }{ + { + name: "Valid Basic Auth Config", + config: &Config{ + Username: "fake", + Password: "fake", + }, + }, + { + name: "Valid Bearer + Username Auth Config", + config: &Config{ + APIToken: "fake", + Username: "fake", + }, + }, + { + name: "Missing username", + config: &Config{}, + expectErr: true, + }, + { + name: "Invalid Bearer + Password Auth Config", + config: &Config{ + APIToken: "fake", + Password: "fake", + }, + expectErr: true, + }, + { + name: "UnsafeSsl config", + config: &Config{ + APIToken: "fake", + Username: "fake", + UnsafeSsl: false, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client, err := NewClient(test.config, &scalersconfig.ScalerConfig{}) + + if test.expectErr && err != nil { + return + } + + if test.expectErr && err == nil { + t.Error("Expected error, got nil") + } + + if test.config.UnsafeSsl && client.Client.Transport == nil { + t.Error("Expected SSL client config to be set, but was nil") + } + }) + } +} + +func TestSavedSearch(t *testing.T) { + tests := []struct { + name string + config *Config + expectErr bool + metricValue string + valueField string + response SearchResponse + savedSearchName string + statusCode int + }{ + { + name: "Count - 1", + config: &Config{ + Username: "admin", + Password: "password", + }, + metricValue: "1", + valueField: "count", + response: SearchResponse{Result: map[string]string{"count": "1"}}, + savedSearchName: "testsearch1", + statusCode: http.StatusOK, + }, + { + name: "Count - 100", + config: &Config{ + Username: "admin2", + Password: "password2", + }, + metricValue: "100", + valueField: "count", + response: SearchResponse{Result: map[string]string{"count": "100"}}, + savedSearchName: "testsearch2", + statusCode: http.StatusOK, + }, + { + name: "StatusBadRequest", + config: &Config{ + Username: "admin", + Password: "password", + }, + expectErr: true, + response: SearchResponse{Result: map[string]string{}}, + savedSearchName: "testsearch4", + statusCode: http.StatusBadRequest, + }, + { + name: "StatusForbidden", + config: &Config{ + Username: "admin", + Password: "password", + }, + expectErr: true, + response: SearchResponse{Result: map[string]string{}}, + savedSearchName: "testsearch5", + statusCode: http.StatusForbidden, + }, + { + name: "Validate Bearer Token", + config: &Config{ + APIToken: "sometoken", + Username: "fake", + }, + expectErr: true, + response: SearchResponse{Result: map[string]string{}}, + savedSearchName: "testsearch5", + statusCode: http.StatusForbidden, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + expectedReqPath := fmt.Sprintf(savedSearchPathTemplateStr, test.config.Username) + if r.URL.Path != fmt.Sprintf(savedSearchPathTemplateStr, test.config.Username) { + t.Errorf("Expected request path '%s', got: %s", expectedReqPath, r.URL.Path) + } + + err := r.ParseForm() + if err != nil { + t.Errorf("Expected no error parsing form data, but got '%s'", err.Error()) + } + + searchFormData := r.FormValue("search") + if searchFormData != fmt.Sprintf("savedsearch %s", test.savedSearchName) { + t.Errorf("Expected form data to be 'savedsearch %s' '%s'", test.savedSearchName, searchFormData) + } + + q, err := url.ParseQuery(r.URL.RawQuery) + if err != nil { + t.Errorf("Expected query parsing err to be nil, got %s", err.Error()) + } + + outputMode := q.Get("output_mode") + if outputMode != "json" { + t.Errorf("Expected output_mode query string to be '%s', got: %s", "json", outputMode) + } + + // Bearer token auth + if test.config.APIToken != "" { + actual := r.Header.Get("Authorization") + expected := fmt.Sprintf("Bearer %s", test.config.APIToken) + if actual != expected { + t.Errorf("APIToken is set. Expected Authorization header to be '%s', got: %s", actual, expected) + } + } else { + // Basic auth + reqUsername, reqPassword, ok := r.BasicAuth() + if !ok { + t.Error("Expected basic auth to be set, but was not") + } + if test.config.Username != reqUsername { + t.Errorf("Expected request username to be '%s', got: %s", test.config.Username, reqUsername) + } + if test.config.Password != reqPassword { + t.Errorf("Expected request password to be '%s', got: %s", test.config.Password, reqPassword) + } + } + + w.WriteHeader(test.statusCode) + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(test.response) + if err != nil { + http.Error(w, fmt.Sprintf("error building the response, %v", err), http.StatusInternalServerError) + return + } + })) + defer server.Close() + + test.config.Host = server.URL + s, err := NewClient(test.config, &scalersconfig.ScalerConfig{}) + if err != nil { + t.Errorf("Expected err to be nil, got %s", err.Error()) + } + + splunkResponse, err := s.SavedSearch(test.savedSearchName) + + if test.expectErr && err != nil { + return + } + + if test.expectErr && err == nil { + t.Error("Expected error, got nil") + } + + if err != nil { + t.Errorf("Expected err to be nil, got %s", err.Error()) + } + + v, ok := splunkResponse.Result[test.valueField] + if !ok { + t.Errorf("Expected value field to be %s to exist but did not", test.valueField) + } + + if v != test.metricValue { + t.Errorf("Expected metric value to be %s, got %s", test.metricValue, v) + } + }) + } +} + +func TestToMetric(t *testing.T) { + tests := []struct { + name string + expectErr bool + expectedMetricValue float64 + response *SearchResponse + valueField string + }{ + { + name: "Successful metric conversion - 1", + expectedMetricValue: 1.000000, + response: &SearchResponse{ + Result: map[string]string{ + "count": "1", + }, + }, + valueField: "count", + }, + { + name: "Successful metric conversion - 100", + expectedMetricValue: 100.000000, + response: &SearchResponse{ + Result: map[string]string{ + "count": "100", + }, + }, + valueField: "count", + }, + { + name: "Failed metric type conversion", + expectErr: true, + response: &SearchResponse{ + Result: map[string]string{ + "count": "A", + }, + }, + valueField: "count", + }, + { + name: "Value field not found", + expectErr: true, + response: &SearchResponse{ + Result: map[string]string{ + "fake": "1", + }, + }, + valueField: "count", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + metric, err := test.response.ToMetric(test.valueField) + if test.expectErr && err != nil { + return + } + + if test.expectErr && err == nil { + t.Error("Expected error, got nil") + } + + if test.expectedMetricValue != metric { + t.Errorf("Expected metric value '%f', got: %f", test.expectedMetricValue, metric) + } + }) + } +} diff --git a/pkg/scalers/splunk_scaler.go b/pkg/scalers/splunk_scaler.go new file mode 100644 index 00000000000..d11586df88d --- /dev/null +++ b/pkg/scalers/splunk_scaler.go @@ -0,0 +1,118 @@ +package scalers + +import ( + "context" + "errors" + "fmt" + "net/url" + + "github.com/go-logr/logr" + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/metrics/pkg/apis/external_metrics" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" + "github.com/kedacore/keda/v2/pkg/scalers/splunk" + kedautil "github.com/kedacore/keda/v2/pkg/util" +) + +// SplunkScaler assigns struct data pointer to metadata variable +type SplunkScaler struct { + client *splunk.Client + metricType v2.MetricTargetType + metadata SplunkMetadata + logger logr.Logger +} + +// SplunkMetadata Metadata used by KEDA to search Splunk events and scale +type SplunkMetadata struct { + APIToken string `keda:"name=apiToken, order=authParams, optional"` + Password string `keda:"name=password, order=authParams, optional"` + Username string `keda:"name=username, order=authParams"` + Host string `keda:"name=host, order=triggerMetadata"` + UnsafeSsl bool `keda:"name=unsafeSsl, order=triggerMetadata, optional"` + TargetValue int `keda:"name=targetValue, order=triggerMetadata"` + ActivationValue int `keda:"name=activationValue, order=triggerMetadata"` + SavedSearchName string `keda:"name=savedSearchName, order=triggerMetadata"` + ValueField string `keda:"name=valueField, order=triggerMetadata"` + triggerIndex int +} + +func NewSplunkScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) + } + + meta, err := parseSplunkMetadata(config) + if err != nil { + return nil, fmt.Errorf("error parsing Splunk metadata: %w", err) + } + + client, err := splunk.NewClient(&splunk.Config{ + APIToken: meta.APIToken, + Password: meta.Password, + Username: meta.Username, + Host: meta.Host, + UnsafeSsl: meta.UnsafeSsl, + }, config) + if err != nil { + return nil, err + } + + return &SplunkScaler{ + client: client, + metricType: metricType, + logger: InitializeLogger(config, "splunk_scaler"), + metadata: *meta, + }, nil +} + +func (s *SplunkScaler) Close(context.Context) error { + return nil +} + +func parseSplunkMetadata(config *scalersconfig.ScalerConfig) (*SplunkMetadata, error) { + meta := &SplunkMetadata{} + meta.triggerIndex = config.TriggerIndex + if err := config.TypedConfig(meta); err != nil { + return nil, fmt.Errorf("error parsing splunk metadata: %w", err) + } + + _, err := url.ParseRequestURI(meta.Host) + if err != nil { + return meta, errors.New("invalid value for host. Must be a valid URL such as https://localhost:8089") + } + + return meta, nil +} + +// GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler +func (s *SplunkScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("splunk-%s", s.metadata.SavedSearchName))), + }, + Target: GetMetricTarget(s.metricType, int64(s.metadata.TargetValue)), + } + metricSpec := v2.MetricSpec{ + External: externalMetric, + Type: externalMetricType, + } + return []v2.MetricSpec{metricSpec} +} + +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *SplunkScaler) GetMetricsAndActivity(_ context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + response, err := s.client.SavedSearch(s.metadata.SavedSearchName) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error fetching saved search data from splunk: %w", err) + } + + metricValue, err := response.ToMetric(s.metadata.ValueField) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error finding metric value field: %w", err) + } + + metric := GenerateMetricInMili(metricName, metricValue) + return []external_metrics.ExternalMetricValue{metric}, int(metricValue) > s.metadata.ActivationValue, nil +} diff --git a/pkg/scalers/splunk_scaler_test.go b/pkg/scalers/splunk_scaler_test.go new file mode 100644 index 00000000000..17e2409cc3c --- /dev/null +++ b/pkg/scalers/splunk_scaler_test.go @@ -0,0 +1,95 @@ +package scalers + +import ( + "context" + "testing" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" +) + +type parseSplunkMetadataTestData struct { + metadata map[string]string + authParams map[string]string + isError bool +} + +type SplunkMetricIdentifier struct { + metadataTestData *parseSplunkMetadataTestData + triggerIndex int + name string +} + +var validSplunkAuthParams = map[string]string{ + "username": "fake", +} + +var validSplunkMetadata = map[string]string{ + "host": "https://localhost:8089", + "unsafeSsl": "false", + "targetValue": "1", + "activationValue": "5", + "savedSearchName": "fakeSavedSearchName", + "valueField": "count", +} + +var testSplunkMetadata = []parseSplunkMetadataTestData{ + // Valid metadata for api token auth, pass. + {validSplunkMetadata, map[string]string{"username": "fake", "apiToken": "fake"}, false}, + // Valid metadata for basic auth, pass. + {validSplunkMetadata, map[string]string{"username": "fake", "password": "fake"}, false}, + // No params, missing username, fail. + {map[string]string{}, map[string]string{}, true}, + // No params, missing host, fail. + {map[string]string{}, validSplunkAuthParams, true}, + // Invalid host, fail. + {map[string]string{"host": "missinghttpURIScheme:8089"}, validSplunkAuthParams, true}, + // Invalid unsafeSsl value, fail. + {map[string]string{"host": "https://localhost:8089", "unsafeSsl": "invalid"}, validSplunkAuthParams, true}, + // Missing targetValue, fail. + {map[string]string{"host": "https://localhost:8089", "unsafeSsl": "false"}, validSplunkAuthParams, true}, + // Invalid targetValue, fail. + {map[string]string{"host": "https://localhost:8089", "unsafeSsl": "false", "targetValue": "invalid"}, validSplunkAuthParams, true}, + // Missing activationValue, fail. + {map[string]string{"host": "https://localhost:8089", "unsafeSsl": "false", "targetValue": "1"}, validSplunkAuthParams, true}, + // Invalid activationValue, fail. + {map[string]string{"host": "https://localhost:8089", "unsafeSsl": "false", "targetValue": "1", "activationValue": "invalid"}, validSplunkAuthParams, true}, + // Missing savedSearchName, fail. + {map[string]string{"host": "https://localhost:8089", "unsafeSsl": "false", "targetValue": "1", "activationValue": "5"}, validSplunkAuthParams, true}, + // Missing valueField, fail. + {map[string]string{"host": "https://localhost:8089", "unsafeSsl": "false", "targetValue": "1", "activationValue": "5", "savedSearchName": "fakeSavedSearchName"}, validSplunkAuthParams, true}, +} + +var SplunkMetricIdentifiers = []SplunkMetricIdentifier{ + {&testSplunkMetadata[0], 0, "s0-splunk-fakeSavedSearchName"}, + {&testSplunkMetadata[0], 1, "s1-splunk-fakeSavedSearchName"}, +} + +func TestSplunkParseMetadata(t *testing.T) { + for _, testData := range testSplunkMetadata { + _, err := parseSplunkMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } else if testData.isError && err == nil { + t.Error("Expected error but got success") + } + } +} + +func TestSplunkGetMetricSpecForScaling(t *testing.T) { + for _, testData := range SplunkMetricIdentifiers { + ctx := context.Background() + meta, err := parseSplunkMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validSplunkAuthParams, TriggerIndex: testData.triggerIndex}) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } + mockSplunkScaler := SplunkScaler{ + metadata: *meta, + } + + metricSpec := mockSplunkScaler.GetMetricSpecForScaling(ctx) + metricName := metricSpec[0].External.Metric.Name + if metricName != testData.name { + t.Error("Wrong External metric source name:", metricName) + } + } +} diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 889871fce88..7f6071d54f9 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -245,6 +245,8 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, return scalers.NewSolaceScaler(config) case "solr": return scalers.NewSolrScaler(config) + case "splunk": + return scalers.NewSplunkScaler(config) case "stan": return scalers.NewStanScaler(config) default: diff --git a/tests/scalers/splunk/splunk_test.go b/tests/scalers/splunk/splunk_test.go new file mode 100644 index 00000000000..5fd2d3bf417 --- /dev/null +++ b/tests/scalers/splunk/splunk_test.go @@ -0,0 +1,317 @@ +//go:build e2e +// +build e2e + +package splunk_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +const ( + testName = "splunk-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + configMapName = fmt.Sprintf("%s-configmap", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + username = "admin" + password = "password" + savedSearchName = "e2eSavedSearch" + apiPort = 8089 + maxReplicaCount = 2 + minReplicaCount = 0 + scaleInTargetValue = "10" + scaleInActivationValue = "15" +) + +type templateData struct { + TestNamespace string + ConfigMapName string + DeploymentName string + ScaledObjectName string + SecretName string + SplunkUsername string + SplunkUsernameBase64 string + SplunkPassword string + SplunkPasswordBase64 string + SavedSearchName string + APIPort int + MinReplicaCount string + MaxReplicaCount string + // Preconfigured saved search returns a static value of 10 + // so we need to change the scaled object values at different phases to test scale in + out + TargetValue string + ActivationValue string +} + +const ( + secretTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + username: {{.SplunkUsernameBase64}} + password: {{.SplunkPasswordBase64}} +` + + configMapTemplate = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{.ConfigMapName}} + namespace: {{.TestNamespace}} +data: + default.yml: | + splunk: + conf: + - key: savedsearches + value: + directory: /opt/splunk/etc/users/admin/search/local + content: + {{.SavedSearchName}}: + action.email.useNSSubject: 1 + action.webhook.enable_allowlist: 0 + alert.track: 0 + cron_schedule: '*/1 * * * *' + dispatch.earliest_time: -15m + dispatch.latest_time: now + display.general.type: statistics + display.page.search.tab: statistics + display.visualizations.show: 0 + enableSched: 1 + request.ui_dispatch_app: search + request.ui_dispatch_view: search + search: index=_internal | tail | stats count +` + + triggerAuthenticationTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: keda-trigger-auth-splunk-secret + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: username + name: {{.SecretName}} + key: username + - parameter: password + name: {{.SecretName}} + key: password +` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + splunkDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk + namespace: {{.TestNamespace}} +spec: + replicas: 1 + selector: + matchLabels: + name: splunk + template: + metadata: + labels: + name: splunk + spec: + containers: + - name: splunk + image: splunk/splunk:9.2 + imagePullPolicy: IfNotPresent + env: + - name: SPLUNK_START_ARGS + value: --accept-license + - name: SPLUNK_PASSWORD + value: {{.SplunkPassword}} + ports: + - containerPort: {{.APIPort}} + name: api + protocol: TCP + volumeMounts: + - name: splunkconf-volume + mountPath: /tmp/defaults + volumes: + - name: splunkconf-volume + configMap: + name: {{.ConfigMapName}} +` + + serviceTemplate = `apiVersion: v1 +kind: Service +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + type: ClusterIP + ports: + - name: api + port: {{.APIPort}} + targetPort: {{.APIPort}} + protocol: TCP + selector: + name: splunk +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 3 + cooldownPeriod: 1 + triggers: + - type: splunk + metadata: + host: "https://{{.DeploymentName}}.{{.TestNamespace}}.svc:{{.APIPort}}" + username: {{.SplunkUsername}} + unsafeSsl: "true" + targetValue: "{{.TargetValue}}" + activationValue: "{{.ActivationValue}}" + savedSearchName: {{.SavedSearchName}} + valueField: count + authenticationRef: + name: keda-trigger-auth-splunk-secret +` +) + +func TestSplunkScaler(t *testing.T) { + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + // Wait for splunk to start + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, "splunk", testNamespace, 1, 180, 3), + "replica count should be %d after 3 minutes", 1) + + // Ensure nginx deployment is at min replica count + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 3), + "replica count should be %d after 3 minutes", minReplicaCount) + + // test scaling + testActivation(t, kc) + testScaleOut(t, kc) + testScaleIn(t, kc) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing activation ---") + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale out ---") + + // Saved Search returns 10, let's change the scaled object resource to force scaling out + data := getScaledObjectTemplateData("1", "9") + KubectlApplyWithTemplate(t, data, "scaledObjectTemplateToScaleOut", scaledObjectTemplate) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale in ---") + + // Saved Search returns 10, let's change the scaled object resource to force scaling in + data := getScaledObjectTemplateData(scaleInTargetValue, scaleInActivationValue) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplateToScaleIn", scaledObjectTemplate) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + ConfigMapName: configMapName, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + SplunkUsername: username, + SplunkUsernameBase64: base64.StdEncoding.EncodeToString([]byte(username)), + SplunkPassword: password, + SplunkPasswordBase64: base64.StdEncoding.EncodeToString([]byte(password)), + SavedSearchName: savedSearchName, + APIPort: apiPort, + MinReplicaCount: fmt.Sprintf("%v", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%v", maxReplicaCount), + // Ensure no scaling out since saved search returns 10 by default + TargetValue: scaleInTargetValue, + ActivationValue: scaleInActivationValue, + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "configMapTemplate", Config: configMapTemplate}, + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "serviceTemplate", Config: serviceTemplate}, + {Name: "splunkDeploymentTemplate", Config: splunkDeploymentTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} + +func getScaledObjectTemplateData(targetValue, activationValue string) templateData { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + SplunkUsername: username, + SavedSearchName: savedSearchName, + APIPort: apiPort, + MinReplicaCount: fmt.Sprintf("%v", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%v", maxReplicaCount), + TargetValue: targetValue, + ActivationValue: activationValue, + } +} From d7ee1b5fcf1a1e43a44d6b1770acce48cb72d36a Mon Sep 17 00:00:00 2001 From: Zbynek Roubalik Date: Wed, 3 Jul 2024 07:36:29 +0200 Subject: [PATCH 05/37] Update estimated release dates (#5942) Signed-off-by: Zbynek Roubalik Signed-off-by: novoselov --- ROADMAP.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ROADMAP.md b/ROADMAP.md index 46402080467..fff07da1063 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -14,9 +14,9 @@ Here is an overview of our current release estimations: | Version | Estimated Release Date | |:--------|:-----------------------------------------------------| -| v2.15 | July 9th, 2024 | -| v2.16 | Oct 3rd, 2024 | -| v2.17 | Jan 9th, 2025 | +| v2.15 | July 30th, 2024 | +| v2.16 | Oct 30th, 2024 | +| v2.17 | Jan 30th, 2025 | Here is an overview of our previous releases: From d60b3719a6a64c8ae39fa1de71d52064c66c0753 Mon Sep 17 00:00:00 2001 From: Joel Smith Date: Mon, 8 Jul 2024 05:42:18 -0600 Subject: [PATCH 06/37] Update github.com/hashicorp/go-retryablehttp for CVE-2024-6104 (#5945) Signed-off-by: novoselov --- CHANGELOG.md | 1 + go.mod | 3 +- go.sum | 11 +- .../hashicorp/go-retryablehttp/.go-version | 1 + .../hashicorp/go-retryablehttp/CHANGELOG.md | 26 +++- .../hashicorp/go-retryablehttp/CODEOWNERS | 2 +- .../hashicorp/go-retryablehttp/Makefile | 2 +- .../hashicorp/go-retryablehttp/README.md | 2 +- .../go-retryablehttp/cert_error_go119.go | 14 ++ .../go-retryablehttp/cert_error_go120.go | 14 ++ .../hashicorp/go-retryablehttp/client.go | 132 ++++++++++++++---- vendor/modules.txt | 6 +- 12 files changed, 164 insertions(+), 50 deletions(-) create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.go-version create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a1992488e8..fab886aea19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ Here is an overview of all new **experimental** features: - **General**: Check for missing CRD references and sample CRs ([#5920](https://github.com/kedacore/keda/issues/5920)) - **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806)) - **MongoDB Scaler**: MongoDB url parses correctly `+srv` scheme ([#5760](https://github.com/kedacore/keda/issues/5760)) +- **New Relic Scaler**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp ([#5944](https://github.com/kedacore/keda/issues/5944)) ### Deprecations diff --git a/go.mod b/go.mod index b9b95dcf704..56b5205cd04 100644 --- a/go.mod +++ b/go.mod @@ -260,9 +260,8 @@ require ( github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.6.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.7.5 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect diff --git a/go.sum b/go.sum index 52b6356ba09..e7c5977bb51 100644 --- a/go.sum +++ b/go.sum @@ -1930,16 +1930,15 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -2076,7 +2075,6 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -2289,7 +2287,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.go-version b/vendor/github.com/hashicorp/go-retryablehttp/.go-version new file mode 100644 index 00000000000..6fee2fedb0a --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/.go-version @@ -0,0 +1 @@ +1.22.2 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md index 7a17b9f9930..68a627c6d97 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/CHANGELOG.md @@ -1,14 +1,32 @@ +## 0.7.7 (May 30, 2024) + +BUG FIXES: + +- client: avoid potentially leaking URL-embedded basic authentication credentials in logs (#158) + +## 0.7.6 (May 9, 2024) + +ENHANCEMENTS: + +- client: support a `RetryPrepare` function for modifying the request before retrying (#216) +- client: support HTTP-date values for `Retry-After` header value (#138) +- client: avoid reading entire body when the body is a `*bytes.Reader` (#197) + +BUG FIXES: + +- client: fix a broken check for invalid server certificate in go 1.20+ (#210) + ## 0.7.5 (Nov 8, 2023) -BUG FIXES +BUG FIXES: -- client: fixes an issue where the request body is not preserved on temporary redirects or re-established HTTP/2 connections [GH-207] +- client: fixes an issue where the request body is not preserved on temporary redirects or re-established HTTP/2 connections (#207) ## 0.7.4 (Jun 6, 2023) -BUG FIXES +BUG FIXES: -- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 [GH-194] +- client: fixing an issue where the Content-Type header wouldn't be sent with an empty payload when using HTTP/2 (#194) ## 0.7.3 (May 15, 2023) diff --git a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS index f8389c995e6..d6dd78a2dd9 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS +++ b/vendor/github.com/hashicorp/go-retryablehttp/CODEOWNERS @@ -1 +1 @@ -* @hashicorp/release-engineering \ No newline at end of file +* @hashicorp/go-retryablehttp-maintainers diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile index da17640e644..5255241961e 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/Makefile +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -2,7 +2,7 @@ default: test test: go vet ./... - go test -race ./... + go test -v -race ./... updatedeps: go get -f -t -u ./... diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md index 8943becf19b..145a62f218c 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -59,4 +59,4 @@ standardClient := retryClient.StandardClient() // *http.Client ``` For more usage and examples see the -[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). +[pkg.go.dev](https://pkg.go.dev/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go new file mode 100644 index 00000000000..b2b27e87225 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go119.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !go1.20 +// +build !go1.20 + +package retryablehttp + +import "crypto/x509" + +func isCertError(err error) bool { + _, ok := err.(x509.UnknownAuthorityError) + return ok +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go new file mode 100644 index 00000000000..a3cd315a28b --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/cert_error_go120.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build go1.20 +// +build go1.20 + +package retryablehttp + +import "crypto/tls" + +func isCertError(err error) bool { + _, ok := err.(*tls.CertificateVerificationError) + return ok +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index c9edbd0595b..efee53c400d 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -27,10 +27,8 @@ package retryablehttp import ( "bytes" "context" - "crypto/x509" "fmt" "io" - "io/ioutil" "log" "math" "math/rand" @@ -63,6 +61,10 @@ var ( // limit the size we consume to respReadLimit. respReadLimit = int64(4096) + // timeNow sets the function that returns the current time. + // This defaults to time.Now. Changes to this should only be done in tests. + timeNow = time.Now + // A regular expression to match the error returned by net/http when the // configured number of redirects is exhausted. This error isn't typed // specifically so we resort to matching on the error string. @@ -73,6 +75,11 @@ var ( // specifically so we resort to matching on the error string. schemeErrorRe = regexp.MustCompile(`unsupported protocol scheme`) + // A regular expression to match the error returned by net/http when a + // request header or value is invalid. This error isn't typed + // specifically so we resort to matching on the error string. + invalidHeaderErrorRe = regexp.MustCompile(`invalid header`) + // A regular expression to match the error returned by net/http when the // TLS certificate is not trusted. This error isn't typed // specifically so we resort to matching on the error string. @@ -248,21 +255,19 @@ func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, erro // deal with it seeking so want it to match here instead of the // io.ReadSeeker case. case *bytes.Reader: - buf, err := ioutil.ReadAll(body) - if err != nil { - return nil, 0, err - } + snapshot := *body bodyReader = func() (io.Reader, error) { - return bytes.NewReader(buf), nil + r := snapshot + return &r, nil } - contentLength = int64(len(buf)) + contentLength = int64(body.Len()) // Compat case case io.ReadSeeker: raw := body bodyReader = func() (io.Reader, error) { _, err := raw.Seek(0, 0) - return ioutil.NopCloser(raw), err + return io.NopCloser(raw), err } if lr, ok := raw.(LenReader); ok { contentLength = int64(lr.Len()) @@ -270,7 +275,7 @@ func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, erro // Read all in so we can reset case io.Reader: - buf, err := ioutil.ReadAll(body) + buf, err := io.ReadAll(body) if err != nil { return nil, 0, err } @@ -393,6 +398,9 @@ type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) t // attempted. If overriding this, be sure to close the body if needed. type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) +// PrepareRetry is called before retry operation. It can be used for example to re-sign the request +type PrepareRetry func(req *http.Request) error + // Client is used to make HTTP requests. It adds additional functionality // like automatic retries to tolerate minor outages. type Client struct { @@ -421,6 +429,9 @@ type Client struct { // ErrorHandler specifies the custom error handler to use, if any ErrorHandler ErrorHandler + // PrepareRetry can prepare the request for retry operation, for example re-sign it + PrepareRetry PrepareRetry + loggerInit sync.Once clientInit sync.Once } @@ -494,11 +505,16 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { return false, v } + // Don't retry if the error was due to an invalid header. + if invalidHeaderErrorRe.MatchString(v.Error()) { + return false, v + } + // Don't retry if the error was due to TLS cert verification failure. if notTrustedErrorRe.MatchString(v.Error()) { return false, v } - if _, ok := v.Err.(x509.UnknownAuthorityError); ok { + if isCertError(v.Err) { return false, v } } @@ -535,10 +551,8 @@ func baseRetryPolicy(resp *http.Response, err error) (bool, error) { func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { if resp != nil { if resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable { - if s, ok := resp.Header["Retry-After"]; ok { - if sleep, err := strconv.ParseInt(s[0], 10, 64); err == nil { - return time.Second * time.Duration(sleep) - } + if sleep, ok := parseRetryAfterHeader(resp.Header["Retry-After"]); ok { + return sleep } } } @@ -551,6 +565,41 @@ func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) return sleep } +// parseRetryAfterHeader parses the Retry-After header and returns the +// delay duration according to the spec: https://httpwg.org/specs/rfc7231.html#header.retry-after +// The bool returned will be true if the header was successfully parsed. +// Otherwise, the header was either not present, or was not parseable according to the spec. +// +// Retry-After headers come in two flavors: Seconds or HTTP-Date +// +// Examples: +// * Retry-After: Fri, 31 Dec 1999 23:59:59 GMT +// * Retry-After: 120 +func parseRetryAfterHeader(headers []string) (time.Duration, bool) { + if len(headers) == 0 || headers[0] == "" { + return 0, false + } + header := headers[0] + // Retry-After: 120 + if sleep, err := strconv.ParseInt(header, 10, 64); err == nil { + if sleep < 0 { // a negative sleep doesn't make sense + return 0, false + } + return time.Second * time.Duration(sleep), true + } + + // Retry-After: Fri, 31 Dec 1999 23:59:59 GMT + retryTime, err := time.Parse(time.RFC1123, header) + if err != nil { + return 0, false + } + if until := retryTime.Sub(timeNow()); until > 0 { + return until, true + } + // date is in the past + return 0, true +} + // LinearJitterBackoff provides a callback for Client.Backoff which will // perform linear backoff based on the attempt number and with jitter to // prevent a thundering herd. @@ -578,13 +627,13 @@ func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Resp } // Seed rand; doing this every time is fine - rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + source := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) // Pick a random number that lies somewhere between the min and max and // multiply by the attemptNum. attemptNum starts at zero so we always // increment here. We first get a random percentage, then apply that to the // difference between min and max, and add to min. - jitter := rand.Float64() * float64(max-min) + jitter := source.Float64() * float64(max-min) jitterMin := int64(jitter) + int64(min) return time.Duration(jitterMin * int64(attemptNum)) } @@ -609,19 +658,19 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if logger != nil { switch v := logger.(type) { case LeveledLogger: - v.Debug("performing request", "method", req.Method, "url", req.URL) + v.Debug("performing request", "method", req.Method, "url", redactURL(req.URL)) case Logger: - v.Printf("[DEBUG] %s %s", req.Method, req.URL) + v.Printf("[DEBUG] %s %s", req.Method, redactURL(req.URL)) } } var resp *http.Response var attempt int var shouldRetry bool - var doErr, respErr, checkErr error + var doErr, respErr, checkErr, prepareErr error for i := 0; ; i++ { - doErr, respErr = nil, nil + doErr, respErr, prepareErr = nil, nil, nil attempt++ // Always rewind the request body when non-nil. @@ -634,7 +683,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if c, ok := body.(io.ReadCloser); ok { req.Body = c } else { - req.Body = ioutil.NopCloser(body) + req.Body = io.NopCloser(body) } } @@ -666,9 +715,9 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if err != nil { switch v := logger.(type) { case LeveledLogger: - v.Error("request failed", "error", err, "method", req.Method, "url", req.URL) + v.Error("request failed", "error", err, "method", req.Method, "url", redactURL(req.URL)) case Logger: - v.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + v.Printf("[ERR] %s %s request failed: %v", req.Method, redactURL(req.URL), err) } } else { // Call this here to maintain the behavior of logging all requests, @@ -704,7 +753,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) if logger != nil { - desc := fmt.Sprintf("%s %s", req.Method, req.URL) + desc := fmt.Sprintf("%s %s", req.Method, redactURL(req.URL)) if resp != nil { desc = fmt.Sprintf("%s (status: %d)", desc, resp.StatusCode) } @@ -728,17 +777,26 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // without racing against the closeBody call in persistConn.writeLoop. httpreq := *req.Request req.Request = &httpreq + + if c.PrepareRetry != nil { + if err := c.PrepareRetry(req.Request); err != nil { + prepareErr = err + break + } + } } // this is the closest we have to success criteria - if doErr == nil && respErr == nil && checkErr == nil && !shouldRetry { + if doErr == nil && respErr == nil && checkErr == nil && prepareErr == nil && !shouldRetry { return resp, nil } defer c.HTTPClient.CloseIdleConnections() var err error - if checkErr != nil { + if prepareErr != nil { + err = prepareErr + } else if checkErr != nil { err = checkErr } else if respErr != nil { err = respErr @@ -760,17 +818,17 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // communicate why if err == nil { return nil, fmt.Errorf("%s %s giving up after %d attempt(s)", - req.Method, req.URL, attempt) + req.Method, redactURL(req.URL), attempt) } return nil, fmt.Errorf("%s %s giving up after %d attempt(s): %w", - req.Method, req.URL, attempt, err) + req.Method, redactURL(req.URL), attempt, err) } // Try to read the response body so we can reuse this connection. func (c *Client) drainBody(body io.ReadCloser) { defer body.Close() - _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + _, err := io.Copy(io.Discard, io.LimitReader(body, respReadLimit)) if err != nil { if c.logger() != nil { switch v := c.logger().(type) { @@ -845,3 +903,17 @@ func (c *Client) StandardClient() *http.Client { Transport: &RoundTripper{Client: c}, } } + +// Taken from url.URL#Redacted() which was introduced in go 1.15. +// We can switch to using it directly if we'll bump the minimum required go version. +func redactURL(u *url.URL) string { + if u == nil { + return "" + } + + ru := *u + if _, has := ru.User.Password(); has { + ru.User = url.UserPassword(ru.User.Username(), "xxxxx") + } + return ru.String() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c35508e77a1..4fe4783cc6d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -838,13 +838,11 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-cleanhttp v0.5.2 ## explicit; go 1.13 github.com/hashicorp/go-cleanhttp -# github.com/hashicorp/go-hclog v1.6.2 -## explicit; go 1.13 # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-retryablehttp v0.7.5 -## explicit; go 1.13 +# github.com/hashicorp/go-retryablehttp v0.7.7 +## explicit; go 1.19 github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-rootcerts v1.0.2 ## explicit; go 1.12 From 5a9a8236f54fdc3be69cecb548b4d2d72e204a8a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 13:48:15 +0200 Subject: [PATCH 07/37] chore(deps): update actions/upload-artifact digest to 0b2256b (#5950) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Tom Kerkhove Signed-off-by: novoselov --- .github/workflows/pr-e2e.yml | 2 +- .github/workflows/template-main-e2e-test.yml | 2 +- .github/workflows/template-smoke-tests.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 90963609289..680f1b247eb 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -254,7 +254,7 @@ jobs: details_url: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} - name: Upload test logs - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 with: name: e2e-test-logs path: "${{ github.workspace }}/**/*.log" diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml index 3b891785121..de0e71159d0 100644 --- a/.github/workflows/template-main-e2e-test.yml +++ b/.github/workflows/template-main-e2e-test.yml @@ -47,7 +47,7 @@ jobs: NODE_POOL_SIZE: 1 - name: Upload test logs - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 if: ${{ always() }} with: name: e2e-test-logs diff --git a/.github/workflows/template-smoke-tests.yml b/.github/workflows/template-smoke-tests.yml index bfcb6b7cfaa..a550b224a76 100644 --- a/.github/workflows/template-smoke-tests.yml +++ b/.github/workflows/template-smoke-tests.yml @@ -44,7 +44,7 @@ jobs: run: make smoke-test - name: Upload test logs - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 if: ${{ always() }} with: name: smoke-test-logs ${{ inputs.runs-on }}-${{ inputs.kubernetesVersion }} From 10547f1c80811522320276d968b23aef8f5361e0 Mon Sep 17 00:00:00 2001 From: SpiritZhou Date: Wed, 10 Jul 2024 20:39:25 +0800 Subject: [PATCH 08/37] Provide CloudEvents around the management of ScaledObjects resources (#5953) Signed-off-by: novoselov --- CHANGELOG.md | 1 + apis/eventing/v1alpha1/cloudevent_types.go | 3 + cmd/operator/main.go | 1 - controllers/keda/scaledobject_controller.go | 20 ++-- controllers/keda/scaledobject_finalizer.go | 4 +- controllers/keda/suite_test.go | 1 - pkg/common/message/message.go | 2 + pkg/eventemitter/eventemitter.go | 6 +- .../cloudevent_source_test.go | 107 +++++++++++++++++- 9 files changed, 127 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fab886aea19..ea05a6a3b6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - **General**: Add --ca-dir flag to KEDA operator to specify directories with CA certificates for scalers to authenticate TLS connections (defaults to /custom/ca) ([#5860](https://github.com/kedacore/keda/issues/5860)) - **General**: Declarative parsing of scaler config ([#5037](https://github.com/kedacore/keda/issues/5037)|[#5797](https://github.com/kedacore/keda/issues/5797)) - **General**: Introduce new Splunk Scaler ([#5904](https://github.com/kedacore/keda/issues/5904)) +- **General**: Provide CloudEvents around the management of ScaledObjects resources ([#3522](https://github.com/kedacore/keda/issues/3522)) - **General**: Remove deprecated Kustomize commonLabels ([#5888](https://github.com/kedacore/keda/pull/5888)) - **General**: Support for Kubernetes v1.30 ([#5828](https://github.com/kedacore/keda/issues/5828)) diff --git a/apis/eventing/v1alpha1/cloudevent_types.go b/apis/eventing/v1alpha1/cloudevent_types.go index 89e14109882..fdab5229c96 100644 --- a/apis/eventing/v1alpha1/cloudevent_types.go +++ b/apis/eventing/v1alpha1/cloudevent_types.go @@ -26,6 +26,9 @@ const ( // ScaledObjectFailedType is for event when creating ScaledObject failed ScaledObjectFailedType CloudEventType = "keda.scaledobject.failed.v1" + + // ScaledObjectFailedType is for event when removed ScaledObject + ScaledObjectRemovedType CloudEventType = "keda.scaledobject.removed.v1" ) var AllEventTypes = []CloudEventType{ScaledObjectFailedType, ScaledObjectReadyType} diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 88617aa8523..c9172cb5971 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -223,7 +223,6 @@ func main() { if err = (&kedacontrollers.ScaledObjectReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Recorder: eventRecorder, ScaleClient: scaleClient, ScaleHandler: scaledHandler, EventEmitter: eventEmitter, diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index 27625f6f10a..1032aae5b6d 100755 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/scale" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -70,7 +69,6 @@ import ( type ScaledObjectReconciler struct { Client client.Client Scheme *runtime.Scheme - Recorder record.EventRecorder ScaleClient scale.ScalesGetter ScaleHandler scaling.ScaleHandler EventEmitter eventemitter.EventHandler @@ -119,8 +117,8 @@ func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager, options cont if r.Scheme == nil { return fmt.Errorf("ScaledObjectReconciler.Scheme is not initialized") } - if r.Recorder == nil { - return fmt.Errorf("ScaledObjectReconciler.Recorder is not initialized") + if r.EventEmitter == nil { + return fmt.Errorf("ScaledObjectReconciler.EventEmitter is not initialized") } // Start controller return ctrl.NewControllerManagedBy(mgr). @@ -184,7 +182,7 @@ func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request if !scaledObject.Status.Conditions.AreInitialized() { conditions := kedav1alpha1.GetInitializedConditions() if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledObject, conditions); err != nil { - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) + r.EventEmitter.Emit(scaledObject, req.NamespacedName.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) return ctrl.Result{}, err } } @@ -196,18 +194,18 @@ func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request reqLogger.Error(err, msg) conditions.SetReadyCondition(metav1.ConditionFalse, "ScaledObjectCheckFailed", msg) conditions.SetActiveCondition(metav1.ConditionUnknown, "UnknownState", "ScaledObject check failed") - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectCheckFailed, msg) + r.EventEmitter.Emit(scaledObject, req.NamespacedName.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectCheckFailed, msg) } else { wasReady := conditions.GetReadyCondition() if wasReady.IsFalse() || wasReady.IsUnknown() { - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeNormal, eventingv1alpha1.ScaledObjectReadyType, eventreason.ScaledObjectReady, message.ScalerReadyMsg) + r.EventEmitter.Emit(scaledObject, req.NamespacedName.Namespace, corev1.EventTypeNormal, eventingv1alpha1.ScaledObjectReadyType, eventreason.ScaledObjectReady, message.ScalerReadyMsg) } reqLogger.V(1).Info(msg) conditions.SetReadyCondition(metav1.ConditionTrue, kedav1alpha1.ScaledObjectConditionReadySuccessReason, msg) } if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledObject, &conditions); err != nil { - r.EventEmitter.Emit(scaledObject, req.NamespacedName, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) + r.EventEmitter.Emit(scaledObject, req.NamespacedName.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) return ctrl.Result{}, err } @@ -359,7 +357,7 @@ func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(ctx context.Conte if err != nil { msg := "Failed to parse Group, Version, Kind, Resource" logger.Error(err, msg, "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind) - r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectUpdateFailed, msg) + r.EventEmitter.Emit(scaledObject, scaledObject.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectUpdateFailed, err.Error()) return gvkr, err } gvkString := gvkr.GVKString() @@ -396,12 +394,12 @@ func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(ctx context.Conte if err := r.Client.Get(ctx, client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil { // resource doesn't exist logger.Error(err, message.ScaleTargetNotFoundMsg, "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name) - r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNotFoundMsg) + r.EventEmitter.Emit(scaledObject, scaledObject.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNotFoundMsg) return gvkr, err } // resource exist but doesn't expose /scale subresource logger.Error(errScale, message.ScaleTargetNoSubresourceMsg, "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name) - r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNoSubresourceMsg) + r.EventEmitter.Emit(scaledObject, scaledObject.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectFailedType, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNoSubresourceMsg) return gvkr, errScale } isScalableCache.Store(gr.String(), true) diff --git a/controllers/keda/scaledobject_finalizer.go b/controllers/keda/scaledobject_finalizer.go index b3d48adbd77..70fe12e7d27 100644 --- a/controllers/keda/scaledobject_finalizer.go +++ b/controllers/keda/scaledobject_finalizer.go @@ -24,8 +24,10 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + eventingv1alpha1 "github.com/kedacore/keda/v2/apis/eventing/v1alpha1" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/controllers/keda/util" + "github.com/kedacore/keda/v2/pkg/common/message" "github.com/kedacore/keda/v2/pkg/eventreason" ) @@ -86,7 +88,7 @@ func (r *ScaledObjectReconciler) finalizeScaledObject(ctx context.Context, logge } logger.Info("Successfully finalized ScaledObject") - r.Recorder.Event(scaledObject, corev1.EventTypeNormal, eventreason.ScaledObjectDeleted, "ScaledObject was deleted") + r.EventEmitter.Emit(scaledObject, scaledObject.Namespace, corev1.EventTypeWarning, eventingv1alpha1.ScaledObjectRemovedType, eventreason.ScaledObjectDeleted, message.ScaledObjectRemoved) return nil } diff --git a/controllers/keda/suite_test.go b/controllers/keda/suite_test.go index 54c318b8fdb..8659742f950 100644 --- a/controllers/keda/suite_test.go +++ b/controllers/keda/suite_test.go @@ -94,7 +94,6 @@ var _ = BeforeSuite(func() { err = (&ScaledObjectReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - Recorder: k8sManager.GetEventRecorderFor("keda-operator"), ScaleHandler: scaling.NewScaleHandler(k8sManager.GetClient(), scaleClient, k8sManager.GetScheme(), time.Duration(10), k8sManager.GetEventRecorderFor("keda-operator"), nil), ScaleClient: scaleClient, EventEmitter: eventemitter.NewEventEmitter(k8sManager.GetClient(), k8sManager.GetEventRecorderFor("keda-operator"), "kubernetes-default", nil), diff --git a/pkg/common/message/message.go b/pkg/common/message/message.go index a63d7fa69f3..490b77a6f0e 100644 --- a/pkg/common/message/message.go +++ b/pkg/common/message/message.go @@ -28,4 +28,6 @@ const ( ScaleTargetNotFoundMsg = "Target resource doesn't exist" ScaleTargetNoSubresourceMsg = "Target resource doesn't expose /scale subresource" + + ScaledObjectRemoved = "ScaledObject was deleted" ) diff --git a/pkg/eventemitter/eventemitter.go b/pkg/eventemitter/eventemitter.go index 959156efc1a..a46480ff537 100644 --- a/pkg/eventemitter/eventemitter.go +++ b/pkg/eventemitter/eventemitter.go @@ -73,7 +73,7 @@ type EventEmitter struct { type EventHandler interface { DeleteCloudEventSource(cloudEventSource *eventingv1alpha1.CloudEventSource) error HandleCloudEventSource(ctx context.Context, cloudEventSource *eventingv1alpha1.CloudEventSource) error - Emit(object runtime.Object, namesapce types.NamespacedName, eventType string, cloudeventType eventingv1alpha1.CloudEventType, reason string, message string) + Emit(object runtime.Object, namesapce string, eventType string, cloudeventType eventingv1alpha1.CloudEventType, reason string, message string) } // EventDataHandler defines the behavior for different event handlers @@ -325,7 +325,7 @@ func (e *EventEmitter) checkEventHandlers(ctx context.Context, cloudEventSource } // Emit is emitting event to both local kubernetes and custom CloudEventSource handler. After emit event to local kubernetes, event will inqueue and waitng for handler's consuming. -func (e *EventEmitter) Emit(object runtime.Object, namesapce types.NamespacedName, eventType string, cloudeventType eventingv1alpha1.CloudEventType, reason, message string) { +func (e *EventEmitter) Emit(object runtime.Object, namesapce string, eventType string, cloudeventType eventingv1alpha1.CloudEventType, reason, message string) { e.recorder.Event(object, eventType, reason, message) e.eventHandlersCacheLock.RLock() @@ -337,7 +337,7 @@ func (e *EventEmitter) Emit(object runtime.Object, namesapce types.NamespacedNam objectName, _ := meta.NewAccessor().Name(object) objectType, _ := meta.NewAccessor().Kind(object) eventData := eventdata.EventData{ - Namespace: namesapce.Namespace, + Namespace: namesapce, CloudEventType: cloudeventType, ObjectName: strings.ToLower(objectName), ObjectType: strings.ToLower(objectType), diff --git a/tests/internals/cloudevent_source/cloudevent_source_test.go b/tests/internals/cloudevent_source/cloudevent_source_test.go index e56579207a3..a5fe794c209 100644 --- a/tests/internals/cloudevent_source/cloudevent_source_test.go +++ b/tests/internals/cloudevent_source/cloudevent_source_test.go @@ -27,6 +27,7 @@ var _ = godotenv.Load("../../.env") var ( namespace = fmt.Sprintf("%s-ns", testName) scaledObjectName = fmt.Sprintf("%s-so", testName) + deploymentName = fmt.Sprintf("%s-d", testName) clientName = fmt.Sprintf("%s-client", testName) cloudeventSourceName = fmt.Sprintf("%s-ce", testName) cloudeventSourceErrName = fmt.Sprintf("%s-ce-err", testName) @@ -43,6 +44,7 @@ var ( type templateData struct { TestNamespace string ScaledObject string + DeploymentName string ClientName string CloudEventSourceName string CloudeventSourceErrName string @@ -210,6 +212,56 @@ spec: excludedEventTypes: - keda.scaledobject.failed.v1 ` + + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + deploy: {{.DeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + pod: {{.DeploymentName}} + template: + metadata: + labels: + pod: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: 'nginxinc/nginx-unprivileged' +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObject}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 5 + minReplicaCount: 1 + maxReplicaCount: 10 + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 15 + triggers: + - type: cron + metadata: + timezone: Etc/UTC + start: 3 * * * * + end: 5 * * * * + desiredReplicas: '4' +` ) func TestScaledObjectGeneral(t *testing.T) { @@ -223,6 +275,7 @@ func TestScaledObjectGeneral(t *testing.T) { assert.True(t, WaitForAllPodRunningInNamespace(t, kc, namespace, 5, 20), "all pods should be running") testErrEventSourceEmitValue(t, kc, data) + testEventSourceEmitValue(t, kc, data) testErrEventSourceExcludeValue(t, kc, data) testErrEventSourceIncludeValue(t, kc, data) testErrEventSourceCreation(t, kc, data) @@ -258,8 +311,16 @@ func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data tem foundEvents = append(foundEvents, cloudEvent) data := map[string]string{} err := cloudEvent.DataAs(&data) + t.Log("--- test emitting eventsource about scaledobject err---", "message", data["message"]) + assert.NoError(t, err) - assert.Equal(t, data["message"], "ScaledObject doesn't have correct scaleTargetRef specification") + assert.Condition(t, func() bool { + if data["message"] == "ScaledObject doesn't have correct scaleTargetRef specification" || data["message"] == "Target resource doesn't exist" { + return true + } + return false + }, "get filtered event") + assert.Equal(t, cloudEvent.Type(), "keda.scaledobject.failed.v1") assert.Equal(t, cloudEvent.Source(), expectedSource) assert.Equal(t, cloudEvent.DataContentType(), "application/json") @@ -272,6 +333,49 @@ func testErrEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data tem assert.NotEmpty(t, foundEvents) } +func testEventSourceEmitValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { + t.Log("--- test emitting eventsource about scaledobject removed---") + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + KubectlApplyWithTemplate(t, data, "deploymentTemplate", deploymentTemplate) + + // wait 15 seconds to ensure event propagation + time.Sleep(5 * time.Second) + KubectlDeleteWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + time.Sleep(10 * time.Second) + + out, outErr, err := ExecCommandOnSpecificPod(t, clientName, namespace, fmt.Sprintf("curl -X GET %s/getCloudEvent/%s", cloudEventHTTPServiceURL, "ScaledObjectDeleted")) + assert.NotEmpty(t, out) + assert.Empty(t, outErr) + assert.NoError(t, err, "dont expect error requesting ") + + cloudEvents := []cloudevents.Event{} + err = json.Unmarshal([]byte(out), &cloudEvents) + + assert.NoError(t, err, "dont expect error unmarshaling the cloudEvents") + assert.Greater(t, len(cloudEvents), 0, "cloudEvents should have at least 1 item") + + foundEvents := []cloudevents.Event{} + + for _, cloudEvent := range cloudEvents { + if cloudEvent.Subject() == expectedSubject { + foundEvents = append(foundEvents, cloudEvent) + data := map[string]string{} + err := cloudEvent.DataAs(&data) + + assert.NoError(t, err) + assert.Equal(t, data["message"], "ScaledObject was deleted") + assert.Equal(t, cloudEvent.Type(), "keda.scaledobject.removed.v1") + assert.Equal(t, cloudEvent.Source(), expectedSource) + assert.Equal(t, cloudEvent.DataContentType(), "application/json") + + if lastCloudEventTime.Before(cloudEvent.Time()) { + lastCloudEventTime = cloudEvent.Time() + } + } + } + assert.NotEmpty(t, foundEvents) +} + // tests error events not emitted by func testErrEventSourceExcludeValue(t *testing.T, _ *kubernetes.Clientset, data templateData) { t.Log("--- test emitting eventsource about scaledobject err with exclude filter---") @@ -362,6 +466,7 @@ func getTemplateData() (templateData, []Template) { return templateData{ TestNamespace: namespace, ScaledObject: scaledObjectName, + DeploymentName: deploymentName, ClientName: clientName, CloudEventSourceName: cloudeventSourceName, CloudeventSourceErrName: cloudeventSourceErrName, From 8db4090425cbada5b6df6a6f0dea8b64acafd25e Mon Sep 17 00:00:00 2001 From: novoselov Date: Thu, 1 Aug 2024 12:03:24 +0500 Subject: [PATCH 09/37] fix scaler and tests Signed-off-by: novoselov --- pkg/scalers/selenium_grid_scaler.go | 12 ++-- pkg/scalers/selenium_grid_scaler_test.go | 87 +++++++++++++++++++----- 2 files changed, 77 insertions(+), 22 deletions(-) diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index dd57bd58331..3e035047edc 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -153,7 +153,7 @@ func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2.Metri func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.Logger) (int64, error) { body, err := json.Marshal(map[string]string{ - "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId, platformName } } }", + "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId } } }", }) if err != nil { @@ -200,9 +200,10 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s var capability = capability{} if err := json.Unmarshal([]byte(sessionQueueRequest), &capability); err == nil { if capability.BrowserName == browserName { - if strings.HasPrefix(capability.BrowserVersion, browserVersion) && strings.EqualFold(capability.PlatformName, platformName) { + var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) + if strings.HasPrefix(capability.BrowserVersion, browserVersion) && platformNameMatches { count++ - } else if len(strings.TrimSpace(capability.BrowserVersion)) == 0 && browserVersion == DefaultBrowserVersion && strings.EqualFold(capability.PlatformName, platformName) { + } else if len(strings.TrimSpace(capability.BrowserVersion)) == 0 && browserVersion == DefaultBrowserVersion && platformNameMatches { count++ } } @@ -215,10 +216,11 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s for _, session := range sessions { var capability = capability{} if err := json.Unmarshal([]byte(session.Capabilities), &capability); err == nil { + var platformNameMatches = capability.PlatformName == "" || strings.EqualFold(capability.PlatformName, platformName) if capability.BrowserName == sessionBrowserName { - if strings.HasPrefix(capability.BrowserVersion, browserVersion) && strings.EqualFold(capability.PlatformName, platformName) { + if strings.HasPrefix(capability.BrowserVersion, browserVersion) && platformNameMatches { count++ - } else if browserVersion == DefaultBrowserVersion && strings.EqualFold(capability.PlatformName, platformName) { + } else if browserVersion == DefaultBrowserVersion && platformNameMatches { count++ } } diff --git a/pkg/scalers/selenium_grid_scaler_test.go b/pkg/scalers/selenium_grid_scaler_test.go index 95e24743c1e..db846287b67 100644 --- a/pkg/scalers/selenium_grid_scaler_test.go +++ b/pkg/scalers/selenium_grid_scaler_test.go @@ -76,7 +76,8 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" } ] } @@ -150,12 +151,14 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" }, { "id": "0f9c5a941aa4d755a54b84be1f6535b2", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d", + "platformName": "linux" } ] } @@ -184,12 +187,14 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" }, { "id": "0f9c5a941aa4d755a54b84be1f6535b2", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d", + "platformName": "linux" } ] } @@ -218,12 +223,14 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" }, { "id": "0f9c5a941aa4d755a54b84be1f6535b2", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d", + "platformName": "linux" } ] } @@ -252,12 +259,14 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" }, { "id": "0f9c5a941aa4d755a54b84be1f6535b2", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d", + "platformName": "linux" } ] } @@ -286,12 +295,14 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" }, { "id": "0f9c5a941aa4d755a54b84be1f6535b2", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983d", + "platformName": "linux" } ] } @@ -320,7 +331,8 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" } ] } @@ -334,6 +346,42 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { want: 2, wantErr: false, }, + { + name: "should scale on windows nodes when platformName specified in request", + args: args{ + b: []byte(`{ + "data": { + "grid":{ + "maxSession": 1, + "nodeCount": 1 + }, + "sessionsInfo": { + "sessionQueueRequests": ["{\n \"browserName\": \"chrome\"\n}","{\n \"browserName\": \"chrome\",\n \"platformName\": \"Windows 11\"\n}"], + "sessions": [ + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n,\n \"platformName\": \"Windows 11\"\n}", + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "Windows 11" + }, + { + "id": "0f9c5a941aa4d755a54b84be1f6535b1", + "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n,\n \"platformName\": \"Windows 11\"\n}", + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "Windows 11" + } + ] + } + } + }`), + browserName: "chrome", + sessionBrowserName: "chrome", + browserVersion: "91.0", + platformName: "Windows 11", + }, + want: 2, + wantErr: false, + }, { name: "1 active msedge session with matching browsername/sessionBrowserName should return count as 3", args: args{ @@ -349,7 +397,8 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"msedge\",\n \"browserVersion\": \"91.0.4472.114\",\n \"msedge\": {\n \"msedgedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"ms:edgeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" } ] } @@ -378,7 +427,8 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"msedge\",\n \"browserVersion\": \"91.0.4472.114\",\n \"msedge\": {\n \"msedgedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"ms:edgeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" } ] } @@ -407,7 +457,8 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"msedge\",\n \"browserVersion\": \"91.0.4472.114\",\n \"msedge\": {\n \"msedgedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"ms:edgeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" } ] } @@ -482,12 +533,14 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"Windows 11\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "linux" }, { "id": "0f9c5a941aa4d755a54b84be1f6535b1", "capabilities": "{\n \"acceptInsecureCerts\": false,\n \"browserName\": \"chrome\",\n \"browserVersion\": \"91.0.4472.114\",\n \"chrome\": {\n \"chromedriverVersion\": \"91.0.4472.101 (af52a90bf87030dd1523486a1cd3ae25c5d76c9b-refs\\u002fbranch-heads\\u002f4472@{#1462})\",\n \"userDataDir\": \"\\u002ftmp\\u002f.com.google.Chrome.DMqx9m\"\n },\n \"goog:chromeOptions\": {\n \"debuggerAddress\": \"localhost:35839\"\n },\n \"networkConnectionEnabled\": false,\n \"pageLoadStrategy\": \"normal\",\n \"platformName\": \"linux\",\n \"proxy\": {\n },\n \"se:cdp\": \"http:\\u002f\\u002flocalhost:35839\",\n \"se:cdpVersion\": \"91.0.4472.114\",\n \"se:vncEnabled\": true,\n \"se:vncLocalAddress\": \"ws:\\u002f\\u002flocalhost:7900\\u002fwebsockify\",\n \"setWindowRect\": true,\n \"strictFileInteractability\": false,\n \"timeouts\": {\n \"implicit\": 0,\n \"pageLoad\": 300000,\n \"script\": 30000\n },\n \"unhandledPromptBehavior\": \"dismiss and notify\",\n \"webauthn:extension:largeBlob\": true,\n \"webauthn:virtualAuthenticators\": true\n}", - "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c" + "nodeId": "d44dcbc5-0b2c-4d5e-abf4-6f6aa5e0983c", + "platformName": "Windows 11" } ] } From 32966e119bc4b36e967b446cf2237f78bf86e149 Mon Sep 17 00:00:00 2001 From: Indresh2410 Date: Mon, 22 Jul 2024 18:06:46 +0530 Subject: [PATCH 10/37] Add username from env mysql scaler (#5952) * Add support to fetch username from env for MYSQL Scaler Signed-off-by: Indresh2410 * Add changelog Signed-off-by: Indresh2410 * Change if-else block to switch block Signed-off-by: Indresh2410 * Refactor scaler switch block Signed-off-by: Indresh2410 * Refactor method Signed-off-by: Indresh2410 * Refactor Mysql Scaler Signed-off-by: Indresh2410 * Address failure in static checks Signed-off-by: Indresh2410 * Address Review Comments Signed-off-by: Indresh2410 --------- Signed-off-by: Indresh2410 Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scalers/mysql_scaler.go | 123 +++++++------------------------ pkg/scalers/mysql_scaler_test.go | 19 ++++- 3 files changed, 45 insertions(+), 98 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea05a6a3b6e..82f84f77272 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ Here is an overview of all new **experimental** features: - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) - **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) - **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) +- **MYSQL Scaler**: Add support to fetch username from env ([#5883](https://github.com/kedacore/keda/issues/5883)) ### Fixes diff --git a/pkg/scalers/mysql_scaler.go b/pkg/scalers/mysql_scaler.go index 976295d0268..65de8cfb1ec 100644 --- a/pkg/scalers/mysql_scaler.go +++ b/pkg/scalers/mysql_scaler.go @@ -5,7 +5,6 @@ import ( "database/sql" "fmt" "net" - "strconv" "strings" "github.com/go-logr/logr" @@ -25,16 +24,16 @@ type mySQLScaler struct { } type mySQLMetadata struct { - connectionString string // Database connection string - username string - password string - host string - port string - dbName string - query string - queryValue float64 - activationQueryValue float64 - metricName string + ConnectionString string `keda:"name=connectionString, order=authParams;resolvedEnv, optional"` // Database connection string + Username string `keda:"name=username, order=triggerMetadata;authParams;resolvedEnv, optional"` + Password string `keda:"name=password, order=authParams;resolvedEnv, optional"` + Host string `keda:"name=host, order=triggerMetadata;authParams, optional"` + Port string `keda:"name=port, order=triggerMetadata;authParams, optional"` + DBName string `keda:"name=dbName, order=triggerMetadata;authParams, optional"` + Query string `keda:"name=query, order=triggerMetadata"` + QueryValue float64 `keda:"name=queryValue, order=triggerMetadata"` + ActivationQueryValue float64 `keda:"name=activationQueryValue, order=triggerMetadata, default=0"` + MetricName string `keda:"name=metricName, order=triggerMetadata, optional"` } // NewMySQLScaler creates a new MySQL scaler @@ -64,101 +63,33 @@ func NewMySQLScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { } func parseMySQLMetadata(config *scalersconfig.ScalerConfig) (*mySQLMetadata, error) { - meta := mySQLMetadata{} + meta := &mySQLMetadata{} - if val, ok := config.TriggerMetadata["query"]; ok { - meta.query = val - } else { - return nil, fmt.Errorf("no query given") - } - - if val, ok := config.TriggerMetadata["queryValue"]; ok { - queryValue, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, fmt.Errorf("queryValue parsing error %w", err) - } - meta.queryValue = queryValue - } else { - if config.AsMetricSource { - meta.queryValue = 0 - } else { - return nil, fmt.Errorf("no queryValue given") - } - } - - meta.activationQueryValue = 0 - if val, ok := config.TriggerMetadata["activationQueryValue"]; ok { - activationQueryValue, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, fmt.Errorf("activationQueryValue parsing error %w", err) - } - meta.activationQueryValue = activationQueryValue - } - - switch { - case config.AuthParams["connectionString"] != "": - meta.connectionString = config.AuthParams["connectionString"] - case config.TriggerMetadata["connectionStringFromEnv"] != "": - meta.connectionString = config.ResolvedEnv[config.TriggerMetadata["connectionStringFromEnv"]] - default: - meta.connectionString = "" - var err error - host, err := GetFromAuthOrMeta(config, "host") - if err != nil { - return nil, err - } - meta.host = host - - port, err := GetFromAuthOrMeta(config, "port") - if err != nil { - return nil, err - } - meta.port = port - - username, err := GetFromAuthOrMeta(config, "username") - if err != nil { - return nil, err - } - meta.username = username - - dbName, err := GetFromAuthOrMeta(config, "dbName") - if err != nil { - return nil, err - } - meta.dbName = dbName - - if config.AuthParams["password"] != "" { - meta.password = config.AuthParams["password"] - } else if config.TriggerMetadata["passwordFromEnv"] != "" { - meta.password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]] - } - - if len(meta.password) == 0 { - return nil, fmt.Errorf("no password given") - } + if err := config.TypedConfig(meta); err != nil { + return nil, fmt.Errorf("error parsing mysql metadata: %w", err) } - if meta.connectionString != "" { - meta.dbName = parseMySQLDbNameFromConnectionStr(meta.connectionString) + if meta.ConnectionString != "" { + meta.DBName = parseMySQLDbNameFromConnectionStr(meta.ConnectionString) } - meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("mysql-%s", meta.dbName))) + meta.MetricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("mysql-%s", meta.DBName))) - return &meta, nil + return meta, nil } // metadataToConnectionStr builds new MySQL connection string func metadataToConnectionStr(meta *mySQLMetadata) string { var connStr string - if meta.connectionString != "" { - connStr = meta.connectionString + if meta.ConnectionString != "" { + connStr = meta.ConnectionString } else { // Build connection str config := mysql.NewConfig() - config.Addr = net.JoinHostPort(meta.host, meta.port) - config.DBName = meta.dbName - config.Passwd = meta.password - config.User = meta.username + config.Addr = net.JoinHostPort(meta.Host, meta.Port) + config.DBName = meta.DBName + config.Passwd = meta.Password + config.User = meta.Username config.Net = "tcp" connStr = config.FormatDSN() } @@ -205,7 +136,7 @@ func (s *mySQLScaler) Close(context.Context) error { // getQueryResult returns result of the scaler query func (s *mySQLScaler) getQueryResult(ctx context.Context) (float64, error) { var value float64 - err := s.connection.QueryRowContext(ctx, s.metadata.query).Scan(&value) + err := s.connection.QueryRowContext(ctx, s.metadata.Query).Scan(&value) if err != nil { s.logger.Error(err, fmt.Sprintf("Could not query MySQL database: %s", err)) return 0, err @@ -217,9 +148,9 @@ func (s *mySQLScaler) getQueryResult(ctx context.Context) (float64, error) { func (s *mySQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: s.metadata.metricName, + Name: s.metadata.MetricName, }, - Target: GetMetricTargetMili(s.metricType, s.metadata.queryValue), + Target: GetMetricTargetMili(s.metricType, s.metadata.QueryValue), } metricSpec := v2.MetricSpec{ External: externalMetric, Type: externalMetricType, @@ -236,5 +167,5 @@ func (s *mySQLScaler) GetMetricsAndActivity(ctx context.Context, metricName stri metric := GenerateMetricInMili(metricName, num) - return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.ActivationQueryValue, nil } diff --git a/pkg/scalers/mysql_scaler_test.go b/pkg/scalers/mysql_scaler_test.go index e2b514a4b2a..70ed8c71d15 100644 --- a/pkg/scalers/mysql_scaler_test.go +++ b/pkg/scalers/mysql_scaler_test.go @@ -7,6 +7,7 @@ import ( ) var testMySQLResolvedEnv = map[string]string{ + "MYSQL_USERNAME": "test_username", "MYSQL_PASSWORD": "pass", "MYSQL_CONN_STR": "user@tcp(http://my.mysql.dev:3306)/stats_db", } @@ -46,6 +47,13 @@ var testMySQLMetadata = []parseMySQLMetadataTestData{ resolvedEnv: testMySQLResolvedEnv, raisesError: false, }, + // Params instead of conn str with userFromEnv + { + metadata: map[string]string{"query": "query", "queryValue": "12", "host": "test_host", "port": "test_port", "usernameFromEnv": "MYSQL_USERNAME", "passwordFromEnv": "MYSQL_PASSWORD", "dbName": "test_dbname"}, + authParams: map[string]string{}, + resolvedEnv: testMySQLResolvedEnv, + raisesError: false, + }, // Params from trigger authentication { metadata: map[string]string{"query": "query", "queryValue": "12"}, @@ -60,6 +68,13 @@ var testMySQLMetadata = []parseMySQLMetadataTestData{ resolvedEnv: testMySQLResolvedEnv, raisesError: true, }, + // No username provided in authParams, metadata, resolvedEnv + { + metadata: map[string]string{"query": "query", "queryValue": "12", "activationQueryValue": "AA"}, + authParams: map[string]string{"host": "test_host", "port": "test_port", "password": "MYSQL_PASSWORD", "dbName": "test_dbname"}, + resolvedEnv: map[string]string{}, + raisesError: true, + }, } var mySQLMetricIdentifiers = []mySQLMetricIdentifier{ @@ -106,8 +121,8 @@ func TestMySQLGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - if meta.metricName != testData.metricName { - t.Error("Wrong External metric source name:", meta.metricName) + if meta.MetricName != testData.metricName { + t.Error("Wrong External metric source name:", meta.MetricName) } } } From 8412f6b5c0fa37de962e225d2584fd64eb599006 Mon Sep 17 00:00:00 2001 From: Semih Buyukgungor Date: Mon, 22 Jul 2024 16:34:13 +0300 Subject: [PATCH 11/37] Return the error if TLS config fails in Rabbitmq scaler (#5964) Signed-off-by: semihbkgr Signed-off-by: novoselov --- pkg/scalers/rabbitmq_scaler.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index 6037aa45944..be2ac66995f 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -454,9 +454,10 @@ func getConnectionAndChannel(host string, meta *rabbitMQMetadata) (*amqp.Connect var err error if meta.enableTLS { tlsConfig, configErr := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) - if configErr == nil { - conn, err = amqp.DialTLS(host, tlsConfig) + if configErr != nil { + return nil, nil, configErr } + conn, err = amqp.DialTLS(host, tlsConfig) } else { conn, err = amqp.Dial(host) } From 6697ce140ba8d8011d77d91a3309c7554875c92d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 21:51:34 +0200 Subject: [PATCH 12/37] chore(deps): update github actions (#5957) | datasource | package | from | to | | ----------- | -------------------- | ------ | ------ | | github-tags | actions/setup-python | v5.1.0 | v5.1.1 | Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: novoselov --- .github/workflows/fossa.yml | 2 +- .github/workflows/pr-validation.yml | 4 ++-- .github/workflows/template-smoke-tests.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml index 09fd0384c7d..a7db0620dbd 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yml @@ -18,7 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version: "1.21" - run: go version diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index bcd34e3984b..b664b7b8b9b 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -138,10 +138,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - uses: actions/setup-python@82c7e631bb3cdc910f68e0081d67478d79c6982d # v5.1.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: 3.x - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version: "1.21" - name: Get golangci diff --git a/.github/workflows/template-smoke-tests.yml b/.github/workflows/template-smoke-tests.yml index a550b224a76..582d0a14f0d 100644 --- a/.github/workflows/template-smoke-tests.yml +++ b/.github/workflows/template-smoke-tests.yml @@ -18,7 +18,7 @@ jobs: runs-on: ${{ inputs.runs-on }} steps: - name: Setup Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: go-version: "1.21" From 1e01186f5533124e0013ac9885d3ac67f251f12d Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Wed, 24 Jul 2024 17:01:12 +0200 Subject: [PATCH 13/37] Migrate old Azure SDKs into to use `github.com/Azure/azure-sdk-for-go/sdk/azidentity` (#5471) * Migrate EventHub SDK Signed-off-by: Jorge Turrado * remove old sdk Signed-off-by: Jorge Turrado * sort Signed-off-by: Jorge Turrado * migrate storage SDK Signed-off-by: Jorge Turrado * update changelog and style Signed-off-by: Jorge Turrado * migrate key vault Signed-off-by: Jorge Turrado * fix typos Signed-off-by: Jorge Turrado * Update azure monitor Signed-off-by: Jorge Turrado * fix typo Signed-off-by: Jorge Turrado * fix e2e tests Signed-off-by: Jorge Turrado * fix error Signed-off-by: Jorge Turrado * use AZQUERY FOR MONITOR Signed-off-by: Jorge Turrado * fix style Signed-off-by: Jorge Turrado * use client optiosn Signed-off-by: Jorge Turrado * use sdk parsing function Signed-off-by: Jorge Turrado * migrate log analytics Signed-off-by: Jorge Turrado * update tokens Signed-off-by: Jorge Turrado * style Signed-off-by: Jorge Turrado * fix typo Signed-off-by: Jorge Turrado * Add missing changes Signed-off-by: Jorge Turrado * bump azure deps Signed-off-by: Jorge Turrado * fix packages Signed-off-by: Jorge Turrado * fix code Signed-off-by: Jorge Turrado * fix test Signed-off-by: Jorge Turrado * fix styles Signed-off-by: Jorge Turrado * remove todos Signed-off-by: Jorge Turrado --------- Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .golangci.yml | 4 - CHANGELOG.md | 1 + Makefile | 10 +- go.mod | 25 +- go.sum | 78 +- .../azure_event_grid_topic_handler.go | 2 +- pkg/scalers/azure/azure_azidentity_chain.go | 8 +- pkg/scalers/azure/azure_blob.go | 61 +- pkg/scalers/azure/azure_blob_test.go | 39 +- pkg/scalers/azure/azure_cloud_environment.go | 7 + pkg/scalers/azure/azure_data_explorer.go | 2 +- pkg/scalers/azure/azure_eventhub.go | 37 +- .../azure/azure_eventhub_checkpoint.go | 149 +- pkg/scalers/azure/azure_eventhub_test.go | 271 +- ...e_managed_prometheus_http_round_tripper.go | 17 +- pkg/scalers/azure/azure_monitor.go | 245 - pkg/scalers/azure/azure_monitor_test.go | 47 - pkg/scalers/azure/azure_queue.go | 44 - pkg/scalers/azure/azure_queue_test.go | 40 - pkg/scalers/azure/azure_storage.go | 181 +- pkg/scalers/azure/azure_storage_test.go | 61 - pkg/scalers/azure_blob_scaler.go | 14 +- .../azure_data_explorer_scaler_test.go | 15 +- pkg/scalers/azure_eventhub_scaler.go | 89 +- pkg/scalers/azure_eventhub_scaler_test.go | 200 +- pkg/scalers/azure_log_analytics_scaler.go | 478 +- .../azure_log_analytics_scaler_test.go | 16 +- pkg/scalers/azure_monitor_scaler.go | 232 +- pkg/scalers/azure_monitor_scaler_test.go | 10 +- pkg/scalers/azure_pipelines_scaler.go | 2 +- pkg/scalers/azure_queue_scaler.go | 29 +- pkg/scalers/azure_queue_scaler_test.go | 7 +- pkg/scalers/azure_servicebus_scaler.go | 16 +- .../resolver/azure_keyvault_handler.go | 64 +- .../resolver/azure_keyvault_handler_test.go | 190 +- pkg/scaling/scalers_builder.go | 2 +- .../azure/azure_blob/azure_blob_test.go | 67 +- .../azure_blob_aad_wi_test.go | 71 +- .../azure_event_hub_aad_wi_test.go | 132 +- .../azure_event_hub_blob_metadata_test.go | 115 +- .../azure_event_hub_dapr_test.go | 121 +- .../azure_event_hub_go_sdk_test.go | 115 +- .../azure/azure_queue/azure_queue_test.go | 62 +- .../azure_queue_aad_wi_test.go | 70 +- tests/scalers/azure/helper/EventHubHelper.go | 133 + tests/scalers/azure/helper/StorageHelper.go | 26 + .../azure_keyvault/azure_keyvault_test.go | 54 +- .../azure_keyvault_workload_identity_test.go | 54 +- .../trigger_auth_secret_test.go | 52 +- .../Azure/azure-amqp-common-go/v4/.gitignore | 19 - .../Azure/azure-amqp-common-go/v4/Makefile | 97 - .../Azure/azure-amqp-common-go/v4/README.md | 49 - .../Azure/azure-amqp-common-go/v4/SECURITY.md | 41 - .../Azure/azure-amqp-common-go/v4/aad/jwt.go | 253 - .../v4/azure-pipelines.yml | 96 - .../Azure/azure-amqp-common-go/v4/cbs/cbs.go | 90 - .../azure-amqp-common-go/v4/changelog.md | 122 - .../azure-amqp-common-go/v4/conn/conn.go | 112 - .../v4/internal/tracing/tracing.go | 32 - .../v4/internal/version.go | 6 - .../Azure/azure-amqp-common-go/v4/ptrs.go | 44 - .../Azure/azure-amqp-common-go/v4/retry.go | 54 - .../Azure/azure-amqp-common-go/v4/rpc/rpc.go | 506 -- .../Azure/azure-amqp-common-go/v4/sas/sas.go | 158 - .../azure-amqp-common-go/v4/uuid/uuid.go | 72 - .../Azure/azure-event-hubs-go/v3/.gitignore | 29 - .../Azure/azure-event-hubs-go/v3/Makefile | 94 - .../Azure/azure-event-hubs-go/v3/SECURITY.md | 41 - .../Azure/azure-event-hubs-go/v3/amqp_mgmt.go | 188 - .../Azure/azure-event-hubs-go/v3/atom/atom.go | 54 - .../azure-event-hubs-go/v3/azuredeploy.tf | 164 - .../Azure/azure-event-hubs-go/v3/batch.go | 221 - .../Azure/azure-event-hubs-go/v3/changelog.md | 245 - .../Azure/azure-event-hubs-go/v3/errors.go | 11 - .../Azure/azure-event-hubs-go/v3/event.go | 365 - .../Azure/azure-event-hubs-go/v3/http_mgmt.go | 183 - .../Azure/azure-event-hubs-go/v3/hub.go | 792 -- .../Azure/azure-event-hubs-go/v3/namespace.go | 143 - .../v3/persist/checkpoint.go | 69 - .../azure-event-hubs-go/v3/persist/file.go | 100 - .../azure-event-hubs-go/v3/persist/persist.go | 58 - .../Azure/azure-event-hubs-go/v3/readme.md | 474 -- .../Azure/azure-event-hubs-go/v3/receiver.go | 498 -- .../Azure/azure-event-hubs-go/v3/sender.go | 395 - .../Azure/azure-event-hubs-go/v3/session.go | 53 - .../Azure/azure-event-hubs-go/v3/tracing.go | 77 - .../Azure/azure-event-hubs-go/v3/version.go | 6 - .../Azure/azure-pipeline-go/pipeline/core.go | 284 - .../azure-pipeline-go/pipeline/defaultlog.go | 14 - .../pipeline/defaultlog_syslog.go | 33 - .../pipeline/defaultlog_windows.go | 61 - .../Azure/azure-pipeline-go/pipeline/doc.go | 161 - .../Azure/azure-pipeline-go/pipeline/error.go | 184 - .../azure-pipeline-go/pipeline/progress.go | 82 - .../azure-pipeline-go/pipeline/request.go | 147 - .../azure-pipeline-go/pipeline/response.go | 74 - .../azure-pipeline-go/pipeline/version.go | 9 - .../Azure/azure-sdk-for-go/NOTICE.txt | 29 - .../azure-sdk-for-go/sdk/azcore/arm/client.go | 72 + .../azure-sdk-for-go/sdk/azcore/arm/doc.go | 9 + .../sdk/azcore/arm/resource_identifier.go | 23 + .../sdk/azcore/arm/resource_type.go | 40 + .../messaging/azeventgrid/internal/version.go | 2 +- .../messaging/azeventgrid/publisher/README.md | 2 + .../sdk/messaging/azeventhubs/CHANGELOG.md | 177 + .../sdk/messaging/azeventhubs/LICENSE.txt} | 8 +- .../sdk/messaging/azeventhubs/README.md | 133 + .../sdk/messaging/azeventhubs/amqp_message.go | 271 + .../messaging/azeventhubs/checkpoint_store.go | 70 + .../sdk/messaging/azeventhubs/ci.yml | 35 + .../connection_string_properties.go | 21 + .../messaging/azeventhubs/consumer_client.go | 262 + .../sdk/messaging/azeventhubs/doc.go | 15 + .../sdk/messaging/azeventhubs/error.go | 31 + .../sdk/messaging/azeventhubs/event_data.go | 195 + .../messaging/azeventhubs/event_data_batch.go | 236 + .../azeventhubs/internal/amqpInterfaces.go | 21 + .../azeventhubs/internal/amqp_fakes.go | 149 + .../azeventhubs/internal/amqpwrap/amqpwrap.go | 307 + .../azeventhubs/internal/amqpwrap/error.go | 42 + .../azeventhubs/internal/amqpwrap/rpc.go | 27 + .../azeventhubs/internal/auth/token.go | 39 + .../sdk/messaging/azeventhubs/internal/cbs.go | 78 + .../azeventhubs/internal/constants.go | 7 + .../azeventhubs/internal/eh/eh_internal.go | 21 + .../messaging/azeventhubs/internal/errors.go | 265 + .../exported/connection_string_properties.go | 129 + .../azeventhubs/internal/exported/error.go | 58 + .../internal/exported/log_events.go | 23 + .../internal/exported/retry_options.go | 26 + .../exported/websocket_conn_params.go | 13 + .../messaging/azeventhubs/internal/links.go | 395 + .../azeventhubs/internal/links_recover.go | 155 + .../azeventhubs/internal/namespace.go | 512 ++ .../azeventhubs/internal/namespace_eh.go | 48 + .../sdk/messaging/azeventhubs/internal/rpc.go | 444 + .../messaging/azeventhubs/internal/sas/sas.go | 179 + .../internal/sbauth/token_provider.go | 138 + .../azeventhubs/internal/utils/retrier.go | 138 + .../sdk/messaging/azeventhubs/log.go | 23 + .../sdk/messaging/azeventhubs/mgmt.go | 253 + .../messaging/azeventhubs/migrationguide.md | 106 + .../messaging/azeventhubs/partition_client.go | 380 + .../sdk/messaging/azeventhubs/processor.go | 515 ++ .../azeventhubs/processor_load_balancer.go | 302 + .../azeventhubs/processor_partition_client.go | 73 + .../messaging/azeventhubs/producer_client.go | 312 + .../sdk/messaging/azeventhubs/sample.env | 20 + .../azeventhubs/test-resources.bicep | 225 + .../sdk/messaging/azservicebus/CHANGELOG.md | 6 + .../azservicebus/internal/conn/conn.go | 7 +- .../azservicebus/internal/constants.go | 2 +- .../sdk/monitor/azquery/CHANGELOG.md | 64 + .../{ => sdk/monitor/azquery}/LICENSE.txt | 6 +- .../sdk/monitor/azquery/MIGRATION.md | 177 + .../sdk/monitor/azquery/README.md | 151 + .../sdk/monitor/azquery/TROUBLESHOOTING.md | 177 + .../sdk/monitor/azquery/assets.json | 6 + .../sdk/monitor/azquery/autorest.md | 206 + .../sdk/monitor/azquery/build.go | 10 + .../sdk/monitor/azquery/ci.yml | 35 + .../sdk/monitor/azquery/cloud_config.go | 42 + .../sdk/monitor/azquery/constants.go | 188 + .../sdk/monitor/azquery/custom_client.go | 196 + .../sdk/monitor/azquery/logs_client.go | 168 + .../sdk/monitor/azquery/metrics_client.go | 209 + .../sdk/monitor/azquery/models.go | 371 + .../sdk/monitor/azquery/models_serde.go | 835 ++ .../sdk/monitor/azquery/response_types.go | 40 + .../sdk/monitor/azquery/test-resources.bicep | 50 + .../sdk/monitor/azquery/time_rfc3339.go | 87 + .../sdk/monitor/azquery/version.go | 12 + .../eventhub/armeventhub/CHANGELOG.md | 75 + .../eventhub/armeventhub/LICENSE.txt} | 4 +- .../eventhub/armeventhub/README.md | 99 + .../eventhub/armeventhub/assets.json | 6 + .../eventhub/armeventhub/autorest.md | 13 + .../eventhub/armeventhub/build.go | 7 + .../eventhub/armeventhub/ci.yml | 28 + .../eventhub/armeventhub/client_factory.go | 99 + .../eventhub/armeventhub/clusters_client.go | 577 ++ .../armeventhub/configuration_client.go | 179 + .../eventhub/armeventhub/constants.go | 367 + .../armeventhub/consumergroups_client.go | 340 + .../disasterrecoveryconfigs_client.go | 723 ++ .../eventhub/armeventhub/eventhubs_client.go | 762 ++ .../eventhub/armeventhub/models.go | 902 ++ .../eventhub/armeventhub/models_serde.go | 2442 ++++++ .../eventhub/armeventhub/namespaces_client.go | 1119 +++ .../eventhub/armeventhub/operations_client.go | 88 + .../eventhub/armeventhub/options.go | 367 + .../privateendpointconnections_client.go | 335 + .../privatelinkresources_client.go | 110 + .../eventhub/armeventhub/response_types.go | 359 + .../armeventhub/schemaregistry_client.go | 319 + .../eventhub/armeventhub/time_rfc3339.go | 86 + .../security/keyvault/azsecrets/CHANGELOG.md | 154 + .../security/keyvault/azsecrets/LICENSE.txt} | 0 .../sdk/security/keyvault/azsecrets/README.md | 144 + .../keyvault/azsecrets/TROUBLESHOOTING.md | 4 + .../security/keyvault/azsecrets/assets.json | 6 + .../security/keyvault/azsecrets/autorest.md | 141 + .../sdk/security/keyvault/azsecrets/build.go | 11 + .../sdk/security/keyvault/azsecrets/ci.yml | 30 + .../sdk/security/keyvault/azsecrets/client.go | 677 ++ .../keyvault/azsecrets/custom_client.go | 68 + .../sdk/security/keyvault/azsecrets/models.go | 204 + .../keyvault/azsecrets/models_serde.go | 500 ++ .../security/keyvault/azsecrets/options.go | 71 + .../keyvault/azsecrets/response_types.go | 80 + .../keyvault/azsecrets/test-resources.json | 331 + .../security/keyvault/azsecrets/time_unix.go | 61 + .../security/keyvault/azsecrets/version.go | 12 + .../security/keyvault/internal/CHANGELOG.md | 70 + .../security/keyvault/internal/LICENSE.txt} | 0 .../sdk/security/keyvault/internal/README.md | 21 + .../keyvault/internal/challenge_policy.go | 175 + .../keyvault/internal/ci.securitykeyvault.yml | 28 + .../security/keyvault/internal/constants.go} | 10 +- .../sdk/security/keyvault/internal/doc.go | 7 + .../sdk/security/keyvault/internal/parse.go | 37 + .../sdk/storage/azblob/CHANGELOG.md | 285 + .../sdk/storage/azblob/LICENSE.txt} | 0 .../sdk/storage/azblob/README.md | 282 + .../sdk/storage/azblob/appendblob/client.go | 366 + .../sdk/storage/azblob/appendblob/models.go | 180 + .../storage/azblob/appendblob/responses.go | 26 + .../sdk/storage/azblob/assets.json | 6 + .../sdk/storage/azblob/blob/client.go | 471 ++ .../sdk/storage/azblob/blob/constants.go | 235 + .../sdk/storage/azblob/blob/models.go | 580 ++ .../sdk/storage/azblob/blob/responses.go | 119 + .../sdk/storage/azblob/blob/retry_reader.go} | 148 +- .../sdk/storage/azblob/blob/utils.go | 79 + .../storage/azblob/bloberror/error_codes.go | 159 + .../storage/azblob/blockblob/chunkwriting.go | 249 + .../sdk/storage/azblob/blockblob/client.go | 597 ++ .../sdk/storage/azblob/blockblob/constants.go | 52 + .../sdk/storage/azblob/blockblob/models.go | 411 + .../sdk/storage/azblob/blockblob/responses.go | 117 + .../sdk/storage/azblob/ci.yml | 34 + .../sdk/storage/azblob/client.go | 164 + .../sdk/storage/azblob/common.go | 36 + .../sdk/storage/azblob/constants.go | 37 + .../storage/azblob/container/batch_builder.go | 94 + .../sdk/storage/azblob/container/client.go | 437 + .../sdk/storage/azblob/container/constants.go | 150 + .../sdk/storage/azblob/container/models.go | 427 + .../sdk/storage/azblob/container/responses.go | 69 + .../sdk/storage/azblob/doc.go | 210 + .../storage/azblob/internal/base/clients.go | 129 + .../internal/exported/access_conditions.go | 43 + .../azblob/internal/exported/access_policy.go | 67 + .../azblob/internal/exported/blob_batch.go | 280 + .../azblob/internal/exported/exported.go | 33 + .../azblob/internal/exported/log_events.go | 20 + .../azblob/internal/exported/set_expiry.go | 71 + .../exported/shared_key_credential.go | 225 + .../exported/transfer_validation_option.go | 67 + .../exported/user_delegation_credential.go | 64 + .../azblob/internal/exported/version.go | 12 + .../internal/generated/appendblob_client.go | 32 + .../azblob/internal/generated/autorest.md | 475 ++ .../azblob/internal/generated/blob_client.go | 44 + .../internal/generated/block_blob_client.go | 32 + .../azblob/internal/generated/build.go | 10 + .../azblob/internal/generated/constants.go | 9 + .../internal/generated/container_client.go | 30 + .../azblob/internal/generated/models.go | 141 + .../internal/generated/pageblob_client.go | 30 + .../internal/generated/service_client.go | 30 + .../generated/zz_appendblob_client.go | 662 ++ .../internal/generated/zz_blob_client.go | 2962 +++++++ .../internal/generated/zz_blockblob_client.go | 993 +++ .../azblob/internal/generated/zz_constants.go | 747 ++ .../internal/generated/zz_container_client.go | 1591 ++++ .../azblob/internal/generated/zz_models.go | 544 ++ .../internal/generated/zz_models_serde.go | 472 ++ .../azblob/internal/generated/zz_options.go | 1469 ++++ .../internal/generated/zz_pageblob_client.go | 1295 +++ .../internal/generated/zz_response_types.go | 2016 +++++ .../internal/generated/zz_service_client.go | 580 ++ .../internal/generated/zz_time_rfc1123.go | 42 + .../internal/generated/zz_time_rfc3339.go | 58 + .../internal/generated/zz_xml_helper.go | 53 + .../azblob/internal/shared/batch_transfer.go | 81 + .../azblob/internal/shared/buffer_manager.go | 70 + .../azblob/internal/shared/bytes_writer.go | 30 + .../internal/shared/challenge_policy.go | 115 + .../azblob/internal/shared/mmf_unix.go | 38 + .../azblob/internal/shared/mmf_windows.go | 56 + .../azblob/internal/shared/section_writer.go | 53 + .../storage/azblob/internal/shared/shared.go | 271 + .../sdk/storage/azblob/log.go | 16 + .../sdk/storage/azblob/migrationguide.md | 76 + .../sdk/storage/azblob/models.go | 69 + .../sdk/storage/azblob/pageblob/client.go | 454 + .../sdk/storage/azblob/pageblob/constants.go | 65 + .../sdk/storage/azblob/pageblob/models.go | 330 + .../sdk/storage/azblob/pageblob/responses.go | 38 + .../sdk/storage/azblob/responses.go | 51 + .../sdk/storage/azblob/sas/account.go | 229 + .../sdk/storage/azblob/sas/query_params.go | 452 + .../sdk/storage/azblob/sas/service.go | 472 ++ .../sdk/storage/azblob/sas/url_parts.go} | 133 +- .../storage/azblob/service/batch_builder.go | 94 + .../sdk/storage/azblob/service/client.go | 377 + .../sdk/storage/azblob/service/constants.go | 92 + .../sdk/storage/azblob/service/models.go | 361 + .../sdk/storage/azblob/service/responses.go | 63 + .../sdk/storage/azblob/test-resources.json | 579 ++ .../sdk/storage/azqueue/CHANGELOG.md | 14 + .../sdk/storage/azqueue/LICENSE.txt} | 2 +- .../sdk/storage/azqueue/README.md | 241 + .../sdk/storage/azqueue/assets.json | 6 + .../sdk/storage/azqueue/ci.yml | 28 + .../sdk/storage/azqueue/constants.go | 18 + .../sdk/storage/azqueue/doc.go | 168 + .../storage/azqueue/internal/base/clients.go | 59 + .../internal/exported/access_policy.go | 57 + .../exported/shared_key_credential.go | 218 + .../azqueue/internal/exported/version.go | 12 + .../azqueue/internal/generated/autorest.md | 150 + .../azqueue/internal/generated/build.go | 10 + .../internal/generated/queue_client.go | 17 + .../internal/generated/service_client.go | 17 + .../internal/generated/zz_constants.go | 154 + .../internal/generated/zz_messageid_client.go | 176 + .../internal/generated/zz_messages_client.go | 299 + .../azqueue/internal/generated/zz_models.go | 430 + .../internal/generated/zz_models_serde.go | 281 + .../internal/generated/zz_queue_client.go | 432 + .../internal/generated/zz_response_types.go | 225 + .../internal/generated/zz_service_client.go | 276 + .../internal/generated/zz_time_rfc1123.go | 43 + .../internal/generated/zz_time_rfc3339.go | 59 + .../internal/generated/zz_xml_helper.go | 41 + .../storage/azqueue/internal/shared/shared.go | 146 + .../sdk/storage/azqueue/models.go | 470 ++ .../sdk/storage/azqueue/queue_client.go | 255 + .../storage/azqueue/queueerror/error_codes.go | 99 + .../sdk/storage/azqueue/responses.go | 67 + .../sdk/storage/azqueue/sas/account.go | 223 + .../sdk/storage/azqueue/sas/query_params.go | 504 ++ .../sdk/storage/azqueue/sas/service.go | 133 + .../sdk/storage/azqueue/sas/url_parts.go | 98 + .../sdk/storage/azqueue/service_client.go | 223 + .../sdk/storage/azqueue/test-resources.json | 516 ++ .../mgmt/2017-04-01/eventhub/CHANGELOG.md | 2 - .../mgmt/2017-04-01/eventhub/_meta.json | 11 - .../mgmt/2017-04-01/eventhub/client.go | 43 - .../2017-04-01/eventhub/consumergroups.go | 476 -- .../eventhub/disasterrecoveryconfigs.go | 1031 --- .../mgmt/2017-04-01/eventhub/enums.go | 198 - .../mgmt/2017-04-01/eventhub/eventhubs.go | 1089 --- .../mgmt/2017-04-01/eventhub/models.go | 2434 ------ .../mgmt/2017-04-01/eventhub/namespaces.go | 1694 ---- .../mgmt/2017-04-01/eventhub/operations.go | 140 - .../mgmt/2017-04-01/eventhub/regions.go | 155 - .../mgmt/2017-04-01/eventhub/version.go | 19 - .../keyvault/v7.0/keyvault/CHANGELOG.md | 26 - .../services/keyvault/v7.0/keyvault/client.go | 7313 ----------------- .../v7.0/keyvault/dataplane_meta.json | 11 - .../services/keyvault/v7.0/keyvault/enums.go | 229 - .../services/keyvault/v7.0/keyvault/models.go | 3601 -------- .../keyvault/v7.0/keyvault/version.go | 19 - .../mgmt/2018-03-01/insights/CHANGELOG.md | 2 - .../mgmt/2018-03-01/insights/_meta.json | 11 - .../mgmt/2018-03-01/insights/actiongroups.go | 581 -- .../2018-03-01/insights/activitylogalerts.go | 498 -- .../mgmt/2018-03-01/insights/activitylogs.go | 166 - .../2018-03-01/insights/alertruleincidents.go | 186 - .../mgmt/2018-03-01/insights/alertrules.go | 495 -- .../2018-03-01/insights/autoscalesettings.go | 580 -- .../mgmt/2018-03-01/insights/client.go | 43 - .../2018-03-01/insights/diagnosticsettings.go | 332 - .../insights/diagnosticsettingscategory.go | 180 - .../monitor/mgmt/2018-03-01/insights/enums.go | 596 -- .../2018-03-01/insights/eventcategories.go | 99 - .../mgmt/2018-03-01/insights/logprofiles.go | 418 - .../mgmt/2018-03-01/insights/metricalerts.go | 498 -- .../2018-03-01/insights/metricalertsstatus.go | 186 - .../2018-03-01/insights/metricbaseline.go | 217 - .../2018-03-01/insights/metricdefinitions.go | 109 - .../mgmt/2018-03-01/insights/metrics.go | 152 - .../mgmt/2018-03-01/insights/models.go | 5204 ------------ .../mgmt/2018-03-01/insights/operations.go | 98 - .../insights/scheduledqueryrules.go | 511 -- .../2018-03-01/insights/tenantactivitylogs.go | 168 - .../mgmt/2018-03-01/insights/version.go | 19 - .../azblob/access_conditions.go | 65 - .../azblob/bytes_writer.go | 24 - .../azblob/chunkwriting.go | 215 - .../azblob/common_utils.go | 1 - .../azure-storage-blob-go/azblob/highlevel.go | 569 -- .../azblob/request_common.go | 56 - .../azblob/sas_service.go | 371 - .../azblob/section_writer.go | 47 - .../azblob/service_codes_blob.go | 198 - .../azblob/storage_account_credential.go | 8 - .../azblob/url_append_blob.go | 161 - .../azure-storage-blob-go/azblob/url_blob.go | 363 - .../azblob/url_block_blob.go | 182 - .../azblob/url_container.go | 319 - .../azblob/url_page_blob.go | 276 - .../azblob/url_service.go | 177 - .../azblob/user_delegation_credential.go | 38 - .../azure-storage-blob-go/azblob/version.go | 3 - .../azblob/zc_credential_anonymous.go | 55 - .../azblob/zc_credential_shared_key.go | 205 - .../azblob/zc_credential_token.go | 146 - .../azblob/zc_pipeline.go | 45 - .../azblob/zc_policy_request_log.go | 194 - .../azblob/zc_policy_retry.go | 419 - .../azblob/zc_policy_telemetry.go | 51 - .../azblob/zc_policy_unique_request_id.go | 36 - .../azblob/zc_sas_account.go | 244 - .../azblob/zc_sas_query_params.go | 393 - .../azblob/zc_service_codes_common.go | 134 - .../azblob/zc_storage_error.go | 111 - .../azblob/zc_util_validate.go | 64 - .../azure-storage-blob-go/azblob/zc_uuid.go | 77 - .../azure-storage-blob-go/azblob/zt_doc.go | 89 - .../azblob/zz_generated_append_blob.go | 532 -- .../azblob/zz_generated_blob.go | 2063 ----- .../azblob/zz_generated_block_blob.go | 848 -- .../azblob/zz_generated_client.go | 38 - .../azblob/zz_generated_container.go | 1232 --- .../azblob/zz_generated_models.go | 7302 ---------------- .../azblob/zz_generated_page_blob.go | 1065 --- .../azblob/zz_generated_responder_policy.go | 74 - .../azblob/zz_generated_response_error.go | 95 - .../azblob/zz_generated_service.go | 618 -- .../azblob/zz_generated_validation.go | 367 - .../azblob/zz_generated_version.go | 14 - .../azblob/zz_response_helpers.go | 240 - .../azqueue/parsing_urls.go | 95 - .../azqueue/sas_service.go | 110 - .../azqueue/service_codes_queue.go | 33 - .../azqueue/url_messageid.go | 99 - .../azqueue/url_messages.go | 297 - .../azqueue/url_queue.go | 143 - .../azqueue/url_service.go | 135 - .../azure-storage-queue-go/azqueue/version.go | 4 - .../azqueue/zc_credential_anonymous.go | 55 - .../azqueue/zc_credential_shared_key.go | 196 - .../azqueue/zc_credential_token.go | 137 - .../azqueue/zc_pipeline.go | 43 - .../azqueue/zc_policy_request_log.go | 182 - .../azqueue/zc_policy_retry.go | 408 - .../azqueue/zc_policy_telemetry.go | 51 - .../azqueue/zc_policy_unique_request_id.go | 24 - .../azqueue/zc_sas_account.go | 218 - .../azqueue/zc_sas_query_params.go | 211 - .../azqueue/zc_service_codes_common.go | 131 - .../azqueue/zc_storage_error.go | 111 - .../azure-storage-queue-go/azqueue/zc_uuid.go | 77 - .../azure-storage-queue-go/azqueue/zt_doc.go | 79 - .../azqueue/zt_url_service_test.goX | 132 - .../azqueue/zz_generated_client.go | 38 - .../azqueue/zz_generated_message_id.go | 157 - .../azqueue/zz_generated_messages.go | 345 - .../azqueue/zz_generated_models.go | 1348 --- .../azqueue/zz_generated_queue.go | 393 - .../azqueue/zz_generated_responder_policy.go | 74 - .../azqueue/zz_generated_response_error.go | 95 - .../azqueue/zz_generated_service.go | 344 - .../azqueue/zz_generated_validation.go | 367 - .../azqueue/zz_generated_version.go | 14 - .../Azure/go-autorest/autorest/to/LICENSE | 191 - .../Azure/go-autorest/autorest/to/convert.go | 152 - .../autorest/to/go_mod_tidy_hack.go | 24 - .../go-autorest/autorest/validation/LICENSE | 191 - .../go-autorest/autorest/validation/error.go | 48 - .../autorest/validation/go_mod_tidy_hack.go | 24 - .../autorest/validation/validation.go | 406 - vendor/github.com/devigned/tab/.gitignore | 14 - vendor/github.com/devigned/tab/Makefile | 86 - vendor/github.com/devigned/tab/README.md | 49 - vendor/github.com/devigned/tab/trace.go | 200 - vendor/github.com/mattn/go-ieproxy/.gitignore | 1 - vendor/github.com/mattn/go-ieproxy/README.md | 51 - vendor/github.com/mattn/go-ieproxy/ieproxy.go | 56 - .../mattn/go-ieproxy/ieproxy_darwin.go | 123 - .../mattn/go-ieproxy/ieproxy_unix.go | 15 - .../mattn/go-ieproxy/ieproxy_windows.go | 219 - .../mattn/go-ieproxy/kernel32_data_windows.go | 19 - .../github.com/mattn/go-ieproxy/pac_darwin.go | 141 - .../github.com/mattn/go-ieproxy/pac_unix.go | 8 - .../mattn/go-ieproxy/pac_windows.go | 72 - .../mattn/go-ieproxy/proxy_middleman.go | 11 - .../go-ieproxy/proxy_middleman_darwin.go | 43 - .../mattn/go-ieproxy/proxy_middleman_unix.go | 14 - .../go-ieproxy/proxy_middleman_windows.go | 52 - vendor/github.com/mattn/go-ieproxy/utils.go | 23 - .../mattn/go-ieproxy/winhttp_data_windows.go | 51 - vendor/modules.txt | 95 +- 497 files changed, 55752 insertions(+), 67789 deletions(-) delete mode 100644 pkg/scalers/azure/azure_monitor.go delete mode 100644 pkg/scalers/azure/azure_monitor_test.go delete mode 100644 pkg/scalers/azure/azure_queue.go delete mode 100644 pkg/scalers/azure/azure_queue_test.go create mode 100644 tests/scalers/azure/helper/EventHubHelper.go create mode 100644 tests/scalers/azure/helper/StorageHelper.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/.gitignore delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/Makefile delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/README.md delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/SECURITY.md delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/aad/jwt.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/azure-pipelines.yml delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/cbs/cbs.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/changelog.md delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/conn/conn.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/internal/tracing/tracing.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/internal/version.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/ptrs.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/retry.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/rpc/rpc.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/sas/sas.go delete mode 100644 vendor/github.com/Azure/azure-amqp-common-go/v4/uuid/uuid.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/.gitignore delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/Makefile delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/SECURITY.md delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/amqp_mgmt.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/atom/atom.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/azuredeploy.tf delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/batch.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/changelog.md delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/errors.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/event.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/http_mgmt.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/hub.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/namespace.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/persist/checkpoint.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/persist/file.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/persist/persist.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/readme.md delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/receiver.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/sender.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/session.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/tracing.go delete mode 100644 vendor/github.com/Azure/azure-event-hubs-go/v3/version.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go delete mode 100644 vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/CHANGELOG.md rename vendor/github.com/{mattn/go-ieproxy/LICENSE => Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/LICENSE.txt} (86%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/amqp_message.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/checkpoint_store.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/connection_string_properties.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/consumer_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data_batch.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpInterfaces.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqp_fakes.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth/token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/cbs.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh/eh_internal.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/connection_string_properties.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/log_events.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/retry_options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/websocket_conn_params.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links_recover.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace_eh.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/rpc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas/sas.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth/token_provider.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils/retrier.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/mgmt.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/migrationguide.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/partition_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_load_balancer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_partition_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/producer_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/sample.env create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/test-resources.bicep create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/CHANGELOG.md rename vendor/github.com/Azure/azure-sdk-for-go/{ => sdk/monitor/azquery}/LICENSE.txt (92%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/MIGRATION.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/cloud_config.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/custom_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/logs_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/metrics_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/test-resources.bicep create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/time_rfc3339.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/CHANGELOG.md rename vendor/github.com/{devigned/tab/LICENSE => Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/LICENSE.txt} (93%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/client_factory.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/clusters_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/configuration_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/consumergroups_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/eventhubs_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/namespaces_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/operations_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privatelinkresources_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/schemaregistry_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/time_rfc3339.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/CHANGELOG.md rename vendor/github.com/Azure/{azure-pipeline-go/LICENSE => azure-sdk-for-go/sdk/security/keyvault/azsecrets/LICENSE.txt} (100%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/TROUBLESHOOTING.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/custom_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/test-resources.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/time_unix.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md rename vendor/github.com/Azure/{azure-storage-blob-go/LICENSE => azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt} (100%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml rename vendor/github.com/Azure/azure-sdk-for-go/{version/version.go => sdk/security/keyvault/internal/constants.go} (62%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md rename vendor/github.com/Azure/{azure-storage-queue-go/LICENSE => azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt} (100%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go rename vendor/github.com/Azure/{azure-storage-blob-go/azblob/zc_retry_reader.go => azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go} (55%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go rename vendor/github.com/Azure/{azure-storage-blob-go/azblob/parsing_urls.go => azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go} (65%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/CHANGELOG.md rename vendor/github.com/Azure/{azure-event-hubs-go/v3/LICENSE => azure-sdk-for-go/sdk/storage/azqueue/LICENSE.txt} (98%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/assets.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/base/clients.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/access_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/shared_key_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/version.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/queue_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messageid_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messages_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_queue_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc1123.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc3339.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_xml_helper.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared/shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queue_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queueerror/error_codes.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/responses.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/account.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/query_params.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/service.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/url_parts.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/test-resources.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/CHANGELOG.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/_meta.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/consumergroups.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/disasterrecoveryconfigs.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/enums.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/eventhubs.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/namespaces.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/operations.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/regions.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/version.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/dataplane_meta.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/_meta.json delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/parsing_urls.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/sas_service.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/service_codes_queue.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messageid.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messages.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_queue.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_service.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/version.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_anonymous.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_shared_key.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_token.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_pipeline.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_request_log.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_retry.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_telemetry.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_unique_request_id.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_account.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_query_params.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_service_codes_common.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_storage_error.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_uuid.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_doc.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_url_service_test.goX delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_client.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_message_id.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_messages.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_models.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_queue.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_responder_policy.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_response_error.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_service.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_validation.go delete mode 100644 vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_version.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/convert.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/error.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go delete mode 100644 vendor/github.com/Azure/go-autorest/autorest/validation/validation.go delete mode 100644 vendor/github.com/devigned/tab/.gitignore delete mode 100644 vendor/github.com/devigned/tab/Makefile delete mode 100644 vendor/github.com/devigned/tab/README.md delete mode 100644 vendor/github.com/devigned/tab/trace.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/.gitignore delete mode 100644 vendor/github.com/mattn/go-ieproxy/README.md delete mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/pac_darwin.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/pac_unix.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/pac_windows.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/utils.go delete mode 100644 vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go diff --git a/.golangci.yml b/.golangci.yml index 3ebf499edb5..fcfacf552c5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -80,10 +80,6 @@ issues: - path: azure/azure_app_insights.go linters: - bodyclose - # We need to use Golang 1.18 before update the SDK due to it's a prerrequisite - - linters: - - staticcheck - text: "SA1019: \"github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights" # Exclude for stan_scaler and nats_jetstream_scaler, reason: # pkg/scalers/nats_jetstream_scaler.go:109: 109-153 lines are duplicate of `pkg/scalers/stan_scaler.go:83-127` (dupl) - path: nats_jetstream_scaler.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 82f84f77272..619958b3204 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,7 @@ New deprecation(s): - **General**: Added Pre Regex check before building image in e2e test ([#5783](https://github.com/kedacore/keda/issues/5783)) - **General**: Reduce the number of ScaledObject.Status updates in the fallback ([#5624](https://github.com/kedacore/keda/issues/5624)) +- **Azure Services**: Migrated to `github.com/Azure/azure-sdk-for-go` ([#5470](https://github.com/kedacore/keda/issues/5470)) - **IBM MQ Scaler**: Adding e2e test ([#1287](https://github.com/kedacore/keda/issues/1287)) ## v2.14.0 diff --git a/Makefile b/Makefile index f3f415842e9..890d09a326a 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ all: build ################################################## ##@ Test -# .PHONY: install-test-deps -# install-test-deps: -# go install github.com/jstemmer/go-junit-report/v2@latest +.PHONY: install-test-deps +install-test-deps: + go install gotest.tools/gotestsum@latest .PHONY: test -test: manifests generate fmt vet envtest go-junit-report ## Run tests and export the result to junit format. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test -v 2>&1 ./... -coverprofile cover.out | $(GO_JUNIT_REPORT) -iocopy -set-exit-code -out report.xml +test: manifests generate fmt vet envtest install-test-deps ## Run tests and export the result to junit format. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" gotestsum --format standard-quiet --rerun-fails --junitfile report.xml .PHONY: az-login: diff --git a/go.mod b/go.mod index 56b5205cd04..bd0c6ebc1ec 100644 --- a/go.mod +++ b/go.mod @@ -9,14 +9,17 @@ require ( cloud.google.com/go/storage v1.40.0 dario.cat/mergo v1.0.0 github.com/Azure/azure-amqp-common-go/v4 v4.2.0 - github.com/Azure/azure-event-hubs-go/v3 v3.6.2 github.com/Azure/azure-kusto-go v0.15.2 - github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 - github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 - github.com/Azure/azure-storage-blob-go v0.15.0 - github.com/Azure/azure-storage-queue-go v0.0.0-20230927153703-648530c9aaf2 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 + github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 + github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0 github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 @@ -115,6 +118,8 @@ require ( sigs.k8s.io/kustomize/kustomize/v5 v5.4.1 ) +require filippo.io/edwards25519 v1.1.0 // indirect + // Remove this when they merge the PR and cut a release https://github.com/open-policy-agent/cert-controller/pull/202 replace github.com/open-policy-agent/cert-controller => github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 @@ -167,17 +172,13 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/iam v1.1.7 // indirect code.cloudfoundry.org/clock v1.1.0 // indirect - filippo.io/edwards25519 v1.1.0 // indirect - github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect github.com/Azure/go-amqp v1.0.5 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/DataDog/zstd v1.5.5 // indirect @@ -208,7 +209,6 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/devigned/tab v0.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/eapache/go-resiliency v1.6.0 // indirect @@ -288,7 +288,6 @@ require ( github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-ieproxy v0.0.11 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect diff --git a/go.sum b/go.sum index e7c5977bb51..c87806ce063 100644 --- a/go.sum +++ b/go.sum @@ -1334,15 +1334,9 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-amqp-common-go/v4 v4.2.0 h1:q/jLx1KJ8xeI8XGfkOWMN9XrXzAfVTkyvCxPvHCjd2I= github.com/Azure/azure-amqp-common-go/v4 v4.2.0/go.mod h1:GD3m/WPPma+621UaU6KNjKEo5Hl09z86viKwQjTpV0Q= -github.com/Azure/azure-event-hubs-go/v3 v3.6.2 h1:7rNj1/iqS/i3mUKokA2n2eMYO72TB7lO7OmpbKoakKY= -github.com/Azure/azure-event-hubs-go/v3 v3.6.2/go.mod h1:n+ocYr9j2JCLYqUqz9eI+lx/TEAtL/g6rZzyTFSuIpc= github.com/Azure/azure-kusto-go v0.15.2 h1:OlABJilic9TythSgWW6i8Fd0SgNTg0t9jBu6WVsaixM= github.com/Azure/azure-kusto-go v0.15.2/go.mod h1:9F2zvXH8B6eWzgI1S4k1ZXAIufnBZ1bv1cW1kB1n3D0= -github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= @@ -1350,16 +1344,32 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJc github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 h1:d7S13DPk63SvBJfSUiMJJ26tRsvrBumkLPEfQEAarGk= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0/go.mod h1:7e/gsXp4INB4k/vg0h3UOkYpDK6oZqctxr+L05FGybg= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 h1:QISzMrspEvZj4zrrN2wlNwfum5RmnKQhQNiSujwH7oU= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0/go.mod h1:xNjFERdhyMqZncbNJSPBsTCddk5kwsUVUzELQPMj/LA= -github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= -github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/azure-storage-queue-go v0.0.0-20230927153703-648530c9aaf2 h1:G6pzVaX36QLfGvbLSAt8Leb81MiONYT0L03lhABjrPg= -github.com/Azure/azure-storage-queue-go v0.0.0-20230927153703-648530c9aaf2/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 h1:rTfKOCZGy5ViVrlA74ZPE99a+SgoEE2K/yg3RyW9dFA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0 h1:ANFaLubuHo9lLoee/1La180t1frTwd+0FcaQh2GTlg8= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0/go.mod h1:x/yvhJANijv4JJOq6ql0QKMY2pP9zmjeWcSrZsRn9RY= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 h1:0f6XnzroY1yCQQwxGf/n/2xlaBF02Qhof2as99dGNsY= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1/go.mod h1:vMGz6NOUGJ9h5ONl2kkyaqq5E0g7s4CHNSrXN5fl8UY= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= +github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0 h1:l+LIDHsZkFBiipIKhOn3m5/2MX4bwNwHYWyNulPaTis= +github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0/go.mod h1:BjVVBLUiZ/qR2a4PAhjs8uGXNfStD0tSxgxCMfcVRT8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 h1:+dggnR89/BIIlRlQ6d19dkhhdd/mQUiQbXhyHUFiB4w= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0/go.mod h1:tI9M2Q/ueFi287QRkdrhb9LHm6ZnXgkVYLRC3FhYkPw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 h1:h4Zxgmi9oyZL2l8jeg1iRTqPloHktywWcu0nlJmo1tA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0/go.mod h1:LgLGXawqSreJz135Elog0ywTJDsm0Hz2k+N+6ZK35u8= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0 h1:lJwNFV+xYjHREUTHJKx/ZF6CJSt9znxmLw9DqSTvyRU= +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0/go.mod h1:GfT0aGew8Qj5yiQVqOO5v7N8fanbJGyUoHqXg56qcVY= github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -1367,7 +1377,6 @@ github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= @@ -1382,10 +1391,6 @@ github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSY github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= -github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= @@ -1578,13 +1583,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= -github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= -github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -1637,7 +1639,6 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -1857,7 +1858,6 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1930,15 +1930,16 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= +github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -2022,8 +2023,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jstemmer/go-junit-report/v2 v2.1.0 h1:X3+hPYlSczH9IMIpSC9CQSZA0L+BipYafciZUWHEmsc= -github.com/jstemmer/go-junit-report/v2 v2.1.0/go.mod h1:mgHVr7VUo5Tn8OLVr1cKnLuEy0M92wdRntM99h7RkgQ= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -2075,11 +2074,9 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo= -github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= @@ -2220,8 +2217,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= @@ -2287,6 +2284,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -2591,7 +2589,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3114,7 +3111,6 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -3241,8 +3237,8 @@ modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/eventemitter/azure_event_grid_topic_handler.go b/pkg/eventemitter/azure_event_grid_topic_handler.go index 48be231e9b5..237ac09cc52 100644 --- a/pkg/eventemitter/azure_event_grid_topic_handler.go +++ b/pkg/eventemitter/azure_event_grid_topic_handler.go @@ -58,7 +58,7 @@ func NewAzureEventGridTopicHandler(context context.Context, clusterName string, } client, err = publisher.NewClientWithSharedKeyCredential(spec.Endpoint, azcore.NewKeyCredential(authParams["accessKey"]), nil) case kedav1alpha1.PodIdentityProviderAzureWorkload: - creds, chainedErr := azure.NewChainedCredential(logger, podIdentity.GetIdentityID(), podIdentity.GetIdentityTenantID(), podIdentity.Provider) + creds, chainedErr := azure.NewChainedCredential(logger, podIdentity) if chainedErr != nil { err = chainedErr break diff --git a/pkg/scalers/azure/azure_azidentity_chain.go b/pkg/scalers/azure/azure_azidentity_chain.go index 31f626f2ae2..8f7d54217e9 100644 --- a/pkg/scalers/azure/azure_azidentity_chain.go +++ b/pkg/scalers/azure/azure_azidentity_chain.go @@ -11,7 +11,7 @@ import ( "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) -func NewChainedCredential(logger logr.Logger, identityID, identityTenantID string, podIdentity v1alpha1.PodIdentityProvider) (*azidentity.ChainedTokenCredential, error) { +func NewChainedCredential(logger logr.Logger, podIdentity v1alpha1.AuthPodIdentity) (*azidentity.ChainedTokenCredential, error) { var creds []azcore.TokenCredential // Used for local debug based on az-cli user @@ -26,9 +26,9 @@ func NewChainedCredential(logger logr.Logger, identityID, identityTenantID strin } } - switch podIdentity { + switch podIdentity.Provider { case v1alpha1.PodIdentityProviderAzureWorkload: - wiCred, err := NewADWorkloadIdentityCredential(identityID, identityTenantID) + wiCred, err := NewADWorkloadIdentityCredential(podIdentity.GetIdentityID(), podIdentity.GetIdentityTenantID()) if err != nil { logger.Error(err, "error starting azure workload-identity token provider") } else { @@ -36,7 +36,7 @@ func NewChainedCredential(logger logr.Logger, identityID, identityTenantID strin creds = append(creds, wiCred) } default: - return nil, fmt.Errorf("pod identity %s not supported for azure credentials chain", podIdentity) + return nil, fmt.Errorf("pod identity %s not supported for azure credentials chain", podIdentity.Provider) } // Create the chained credential based on the previous 3 diff --git a/pkg/scalers/azure/azure_blob.go b/pkg/scalers/azure/azure_blob.go index 48150f586eb..ef878d19ebf 100644 --- a/pkg/scalers/azure/azure_blob.go +++ b/pkg/scalers/azure/azure_blob.go @@ -19,10 +19,9 @@ package azure import ( "context" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/gobwas/glob" - - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) type BlobMetadata struct { @@ -30,7 +29,7 @@ type BlobMetadata struct { ActivationTargetBlobCount int64 BlobContainerName string BlobDelimiter string - BlobPrefix string + BlobPrefix *string Connection string AccountName string EndpointSuffix string @@ -39,39 +38,37 @@ type BlobMetadata struct { } // GetAzureBlobListLength returns the count of the blobs in blob container in int -func GetAzureBlobListLength(ctx context.Context, podIdentity kedav1alpha1.AuthPodIdentity, meta *BlobMetadata) (int64, error) { - credential, endpoint, err := ParseAzureStorageBlobConnection(ctx, podIdentity, meta.Connection, meta.AccountName, meta.EndpointSuffix) - if err != nil { - return -1, err - } - - listBlobsSegmentOptions := azblob.ListBlobsSegmentOptions{ - Prefix: meta.BlobPrefix, - } - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - serviceURL := azblob.NewServiceURL(*endpoint, p) - containerURL := serviceURL.NewContainerURL(meta.BlobContainerName) - +func GetAzureBlobListLength(ctx context.Context, blobClient *azblob.Client, meta *BlobMetadata) (int64, error) { + containerClient := blobClient.ServiceClient().NewContainerClient(meta.BlobContainerName) if meta.GlobPattern != nil { - props, err := containerURL.ListBlobsFlatSegment(ctx, azblob.Marker{}, azblob.ListBlobsSegmentOptions{}) - if err != nil { - return -1, err - } - - var count int64 globPattern := *meta.GlobPattern - for _, blobItem := range props.Segment.BlobItems { - if globPattern.Match(blobItem.Name) { - count++ + var count int64 + flatPager := containerClient.NewListBlobsFlatPager(&azblob.ListBlobsFlatOptions{ + Prefix: meta.BlobPrefix, + }) + for flatPager.More() { + resp, err := flatPager.NextPage(ctx) + if err != nil { + return -1, err + } + for _, blobItem := range resp.Segment.BlobItems { + if blobItem.Name != nil && globPattern.Match(*blobItem.Name) { + count++ + } } } return count, nil } - - props, err := containerURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, meta.BlobDelimiter, listBlobsSegmentOptions) - if err != nil { - return -1, err + hierarchyPager := containerClient.NewListBlobsHierarchyPager(meta.BlobDelimiter, &container.ListBlobsHierarchyOptions{ + Prefix: meta.BlobPrefix, + }) + var count int64 + for hierarchyPager.More() { + resp, err := hierarchyPager.NextPage(ctx) + if err != nil { + return -1, err + } + count += int64(len(resp.Segment.BlobItems)) } - - return int64(len(props.Segment.BlobItems)), nil + return count, nil } diff --git a/pkg/scalers/azure/azure_blob_test.go b/pkg/scalers/azure/azure_blob_test.go index 262cd91f163..4c5e9b7c7cd 100644 --- a/pkg/scalers/azure/azure_blob_test.go +++ b/pkg/scalers/azure/azure_blob_test.go @@ -2,41 +2,18 @@ package azure import ( "context" - "encoding/base64" - "errors" "testing" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/stretchr/testify/assert" ) func TestGetBlobLength(t *testing.T) { - meta := BlobMetadata{Connection: "", BlobContainerName: "blobContainerName", AccountName: "", BlobDelimiter: "", BlobPrefix: "", EndpointSuffix: ""} - length, err := GetAzureBlobListLength(context.TODO(), kedav1alpha1.AuthPodIdentity{}, &meta) - if length != -1 { - t.Error("Expected length to be -1, but got", length) - } + meta := BlobMetadata{Connection: "", BlobContainerName: "blobContainerName", AccountName: "", BlobDelimiter: "", BlobPrefix: nil, EndpointSuffix: ""} + blobClient, err := azblob.NewClientFromConnectionString("DefaultEndpointsProtocol=https;AccountName=name;AccountKey=key=;EndpointSuffix=core.windows.net", nil) + assert.NoError(t, err) - if err == nil { - t.Error("Expected error for empty connection string, but got nil") - } - - if !errors.Is(err, ErrAzureConnectionStringKeyName) { - t.Error("Expected error to contain parsing error message, but got", err.Error()) - } - - meta.Connection = "DefaultEndpointsProtocol=https;AccountName=name;AccountKey=key==;EndpointSuffix=core.windows.net" - length, err = GetAzureBlobListLength(context.TODO(), kedav1alpha1.AuthPodIdentity{}, &meta) - - if length != -1 { - t.Error("Expected length to be -1, but got", length) - } - - if err == nil { - t.Error("Expected error for empty connection string, but got nil") - } - - var base64Error base64.CorruptInputError - if !errors.As(err, &base64Error) { - t.Error("Expected error to contain base64 error message, but got", err.Error()) - } + length, err := GetAzureBlobListLength(context.TODO(), blobClient, &meta) + assert.Equal(t, int64(-1), length) + assert.Error(t, err) } diff --git a/pkg/scalers/azure/azure_cloud_environment.go b/pkg/scalers/azure/azure_cloud_environment.go index fef9e754266..3a86c5cc0c8 100644 --- a/pkg/scalers/azure/azure_cloud_environment.go +++ b/pkg/scalers/azure/azure_cloud_environment.go @@ -4,9 +4,16 @@ import ( "fmt" "strings" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" az "github.com/Azure/go-autorest/autorest/azure" ) +var AzureClouds = map[string]cloud.Configuration{ + "AZUREPUBLICCLOUD": cloud.AzurePublic, + "AZUREUSGOVERNMENTCLOUD": cloud.AzureGovernment, + "AZURECHINACLOUD": cloud.AzureChina, +} + const ( DefaultCloud = "azurePublicCloud" diff --git a/pkg/scalers/azure/azure_data_explorer.go b/pkg/scalers/azure/azure_data_explorer.go index ff60d624ab2..caa938e40a6 100644 --- a/pkg/scalers/azure/azure_data_explorer.go +++ b/pkg/scalers/azure/azure_data_explorer.go @@ -91,7 +91,7 @@ func getDataExplorerAuthConfig(metadata *DataExplorerMetadata) (*kusto.Connectio case kedav1alpha1.PodIdentityProviderAzureWorkload: azureDataExplorerLogger.V(1).Info(fmt.Sprintf("Creating Azure Data Explorer Client using podIdentity %s", metadata.PodIdentity.Provider)) - creds, chainedErr := NewChainedCredential(azureDataExplorerLogger, metadata.PodIdentity.GetIdentityID(), metadata.PodIdentity.GetIdentityTenantID(), metadata.PodIdentity.Provider) + creds, chainedErr := NewChainedCredential(azureDataExplorerLogger, metadata.PodIdentity) if chainedErr != nil { return nil, chainedErr } diff --git a/pkg/scalers/azure/azure_eventhub.go b/pkg/scalers/azure/azure_eventhub.go index 23d5dff7b53..be10d3c0188 100644 --- a/pkg/scalers/azure/azure_eventhub.go +++ b/pkg/scalers/azure/azure_eventhub.go @@ -1,15 +1,15 @@ package azure import ( - "context" "errors" "fmt" "strings" - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs" + "github.com/go-logr/logr" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + kedautil "github.com/kedacore/keda/v2/pkg/util" ) // EventHubInfo to keep event hub connection and resources @@ -24,41 +24,36 @@ type EventHubInfo struct { EventHubName string CheckpointStrategy string ServiceBusEndpointSuffix string - ActiveDirectoryEndpoint string - EventHubResourceURL string PodIdentity kedav1alpha1.AuthPodIdentity } -const ( - DefaultEventhubResourceURL = "https://eventhubs.azure.net/" -) - // GetEventHubClient returns eventhub client -func GetEventHubClient(ctx context.Context, info EventHubInfo) (*eventhub.Hub, error) { +func GetEventHubClient(info EventHubInfo, logger logr.Logger) (*azeventhubs.ProducerClient, error) { + opts := &azeventhubs.ProducerClientOptions{TLSConfig: kedautil.CreateTLSClientConfig(false)} + switch info.PodIdentity.Provider { case "", kedav1alpha1.PodIdentityProviderNone: - // The user wants to use a connectionstring, not a pod identity - hub, err := eventhub.NewHubFromConnectionString(info.EventHubConnection) + hub, err := azeventhubs.NewProducerClientFromConnectionString(info.EventHubConnection, info.EventHubName, opts) if err != nil { return nil, fmt.Errorf("failed to create hub client: %w", err) } return hub, nil case kedav1alpha1.PodIdentityProviderAzureWorkload: - // User wants to use AAD Workload Identity - env := azure.Environment{ActiveDirectoryEndpoint: info.ActiveDirectoryEndpoint, ServiceBusEndpointSuffix: info.ServiceBusEndpointSuffix} - hubEnvOptions := eventhub.HubWithEnvironment(env) - provider := NewAzureADWorkloadIdentityTokenProvider(ctx, info.PodIdentity.GetIdentityID(), info.PodIdentity.GetIdentityTenantID(), info.PodIdentity.GetIdentityAuthorityHost(), info.EventHubResourceURL) + creds, chainedErr := NewChainedCredential(logger, info.PodIdentity) + if chainedErr != nil { + return nil, chainedErr + } - return eventhub.NewHub(info.Namespace, info.EventHubName, provider, hubEnvOptions) + return azeventhubs.NewProducerClient(fmt.Sprintf("%s.%s", info.Namespace, info.ServiceBusEndpointSuffix), info.EventHubName, creds, opts) } - return nil, fmt.Errorf("event hub does not support pod identity %v", info.PodIdentity) + return nil, fmt.Errorf("event hub does not support pod identity %v", info.PodIdentity.Provider) } -// ParseAzureEventHubConnectionString parses Event Hub connection string into (namespace, name) +// parseAzureEventHubConnectionString parses Event Hub connection string into (namespace, name) // Connection string should be in following format: // Endpoint=sb://eventhub-namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=secretKey123;EntityPath=eventhub-name -func ParseAzureEventHubConnectionString(connectionString string) (string, string, error) { +func parseAzureEventHubConnectionString(connectionString string) (string, string, error) { parts := strings.Split(connectionString, ";") var eventHubNamespace, eventHubName string @@ -90,7 +85,7 @@ func getHubAndNamespace(info EventHubInfo) (string, string, error) { var eventHubName string var err error if info.EventHubConnection != "" { - eventHubNamespace, eventHubName, err = ParseAzureEventHubConnectionString(info.EventHubConnection) + eventHubNamespace, eventHubName, err = parseAzureEventHubConnectionString(info.EventHubConnection) if err != nil { return "", "", err } diff --git a/pkg/scalers/azure/azure_eventhub_checkpoint.go b/pkg/scalers/azure/azure_eventhub_checkpoint.go index d14d0eec529..657e42ec0d9 100644 --- a/pkg/scalers/azure/azure_eventhub_checkpoint.go +++ b/pkg/scalers/azure/azure_eventhub_checkpoint.go @@ -26,9 +26,7 @@ import ( "strings" "dario.cat/mergo" - "github.com/Azure/azure-storage-blob-go/azblob" - - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" ) // goCheckpoint struct to adapt goSdk Checkpoint @@ -52,8 +50,8 @@ type pythonCheckpoint struct { } type checkpointer interface { - resolvePath(info EventHubInfo) (*url.URL, error) - extractCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) + resolvePath(info EventHubInfo) (string, string, error) + extractCheckpoint(get *azblob.DownloadStreamResponse) (Checkpoint, error) } type azureFunctionCheckpointer struct { @@ -86,9 +84,9 @@ func NewCheckpoint(sequenceNumber int64) Checkpoint { } // GetCheckpointFromBlobStorage reads depending of the CheckpointStrategy the checkpoint from a azure storage -func GetCheckpointFromBlobStorage(ctx context.Context, info EventHubInfo, partitionID string) (Checkpoint, error) { +func GetCheckpointFromBlobStorage(ctx context.Context, blobStorageClient *azblob.Client, info EventHubInfo, partitionID string) (Checkpoint, error) { checkpointer := newCheckpointer(info, partitionID) - return getCheckpoint(ctx, info, checkpointer) + return getCheckpoint(ctx, blobStorageClient, info, checkpointer) } func newCheckpointer(info EventHubInfo, partitionID string) checkpointer { @@ -122,22 +120,21 @@ func newCheckpointer(info EventHubInfo, partitionID string) checkpointer { } // resolve path for AzureFunctionCheckpointer -func (checkpointer *azureFunctionCheckpointer) resolvePath(info EventHubInfo) (*url.URL, error) { +func (checkpointer *azureFunctionCheckpointer) resolvePath(info EventHubInfo) (string, string, error) { eventHubNamespace, eventHubName, err := getHubAndNamespace(info) if err != nil { - return nil, err + return "", "", err } - path, err := url.Parse(fmt.Sprintf("/%s/%s/%s/%s/%s", checkpointer.containerName, eventHubNamespace, eventHubName, info.EventHubConsumerGroup, checkpointer.partitionID)) - if err != nil { - return nil, err + path := fmt.Sprintf("%s/%s/%s/%s", eventHubNamespace, eventHubName, info.EventHubConsumerGroup, checkpointer.partitionID) + if _, err := url.Parse(path); err != nil { + return "", "", err } - - return path, nil + return checkpointer.containerName, path, nil } // extract checkpoint for AzureFunctionCheckpointer -func (checkpointer *azureFunctionCheckpointer) extractCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) { +func (checkpointer *azureFunctionCheckpointer) extractCheckpoint(get *azblob.DownloadStreamResponse) (Checkpoint, error) { var checkpoint Checkpoint err := readToCheckpointFromBody(get, &checkpoint) if err != nil { @@ -148,61 +145,54 @@ func (checkpointer *azureFunctionCheckpointer) extractCheckpoint(get *azblob.Dow } // resolve path for blobMetadataCheckpointer -func (checkpointer *blobMetadataCheckpointer) resolvePath(info EventHubInfo) (*url.URL, error) { +func (checkpointer *blobMetadataCheckpointer) resolvePath(info EventHubInfo) (string, string, error) { eventHubNamespace, eventHubName, err := getHubAndNamespace(info) if err != nil { - return nil, err + return "", "", err } - path, err := url.Parse(fmt.Sprintf("/%s/%s/%s/%s/checkpoint/%s", checkpointer.containerName, eventHubNamespace, eventHubName, strings.ToLower(info.EventHubConsumerGroup), checkpointer.partitionID)) - if err != nil { - return nil, err + path := fmt.Sprintf("%s/%s/%s/checkpoint/%s", eventHubNamespace, eventHubName, strings.ToLower(info.EventHubConsumerGroup), checkpointer.partitionID) + if _, err := url.Parse(path); err != nil { + return "", "", err } - - return path, nil + return checkpointer.containerName, path, nil } // extract checkpoint for blobMetadataCheckpointer -func (checkpointer *blobMetadataCheckpointer) extractCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) { +func (checkpointer *blobMetadataCheckpointer) extractCheckpoint(get *azblob.DownloadStreamResponse) (Checkpoint, error) { return getCheckpointFromStorageMetadata(get, checkpointer.partitionID) } // resolve path for goSdkCheckpointer -func (checkpointer *goSdkCheckpointer) resolvePath(info EventHubInfo) (*url.URL, error) { - path, err := url.Parse(fmt.Sprintf("/%s/%s", info.BlobContainer, checkpointer.partitionID)) - if err != nil { - return nil, err - } - - return path, nil +func (checkpointer *goSdkCheckpointer) resolvePath(info EventHubInfo) (string, string, error) { + return info.BlobContainer, checkpointer.partitionID, nil } // resolve path for daprCheckpointer -func (checkpointer *daprCheckpointer) resolvePath(info EventHubInfo) (*url.URL, error) { +func (checkpointer *daprCheckpointer) resolvePath(info EventHubInfo) (string, string, error) { _, eventHubName, err := getHubAndNamespace(info) if err != nil { - return nil, err + return "", "", err } - path, err := url.Parse(fmt.Sprintf("/%s/dapr-%s-%s-%s", info.BlobContainer, eventHubName, info.EventHubConsumerGroup, checkpointer.partitionID)) - if err != nil { - return nil, err + path := fmt.Sprintf("dapr-%s-%s-%s", eventHubName, info.EventHubConsumerGroup, checkpointer.partitionID) + if _, err := url.Parse(path); err != nil { + return "", "", err } - - return path, nil + return info.BlobContainer, path, nil } // extract checkpoint for DaprCheckpointer -func (checkpointer *daprCheckpointer) extractCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) { +func (checkpointer *daprCheckpointer) extractCheckpoint(get *azblob.DownloadStreamResponse) (Checkpoint, error) { return newGoSdkCheckpoint(get) } // extract checkpoint for goSdkCheckpointer -func (checkpointer *goSdkCheckpointer) extractCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) { +func (checkpointer *goSdkCheckpointer) extractCheckpoint(get *azblob.DownloadStreamResponse) (Checkpoint, error) { return newGoSdkCheckpoint(get) } -func newGoSdkCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) { +func newGoSdkCheckpoint(get *azblob.DownloadStreamResponse) (Checkpoint, error) { var checkpoint goCheckpoint err := readToCheckpointFromBody(get, &checkpoint) if err != nil { @@ -216,19 +206,18 @@ func newGoSdkCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) { } // resolve path for DefaultCheckpointer -func (checkpointer *defaultCheckpointer) resolvePath(info EventHubInfo) (*url.URL, error) { - path, _ := url.Parse(fmt.Sprintf("/%s/%s/%s", info.BlobContainer, info.EventHubConsumerGroup, checkpointer.partitionID)) - - return path, nil +func (checkpointer *defaultCheckpointer) resolvePath(info EventHubInfo) (string, string, error) { + path := fmt.Sprintf("%s/%s", info.EventHubConsumerGroup, checkpointer.partitionID) + return info.BlobContainer, path, nil } // extract checkpoint with deprecated Python sdk checkpoint for backward compatibility -func (checkpointer *defaultCheckpointer) extractCheckpoint(get *azblob.DownloadResponse) (Checkpoint, error) { +func (checkpointer *defaultCheckpointer) extractCheckpoint(get *azblob.DownloadStreamResponse) (Checkpoint, error) { var checkpoint Checkpoint var pyCheckpoint pythonCheckpoint blobData := &bytes.Buffer{} - reader := get.Body(azblob.RetryReaderOptions{}) + reader := get.Body if _, err := blobData.ReadFrom(reader); err != nil { return Checkpoint{}, fmt.Errorf("failed to read blob data: %w", err) } @@ -247,69 +236,47 @@ func (checkpointer *defaultCheckpointer) extractCheckpoint(get *azblob.DownloadR return checkpoint, err } -func getCheckpoint(ctx context.Context, info EventHubInfo, checkpointer checkpointer) (Checkpoint, error) { - var podIdentity = info.PodIdentity - - // For back-compat, prefer a connection string over pod identity when present - if len(info.StorageConnection) != 0 { - podIdentity.Provider = kedav1alpha1.PodIdentityProviderNone - } - - if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAzureWorkload { - if len(info.StorageAccountName) == 0 { - return Checkpoint{}, fmt.Errorf("storageAccountName not supplied when PodIdentity authentication is enabled") - } - } - - blobCreds, storageEndpoint, err := ParseAzureStorageBlobConnection(ctx, podIdentity, info.StorageConnection, info.StorageAccountName, info.BlobStorageEndpoint) - - if err != nil { - return Checkpoint{}, err - } - - path, err := checkpointer.resolvePath(info) +func getCheckpoint(ctx context.Context, blobStorageClient *azblob.Client, info EventHubInfo, checkpointer checkpointer) (Checkpoint, error) { + container, path, err := checkpointer.resolvePath(info) if err != nil { return Checkpoint{}, err } - baseURL := storageEndpoint.ResolveReference(path) - - get, err := downloadBlob(ctx, baseURL, blobCreds) + get, err := blobStorageClient.DownloadStream(ctx, container, path, nil) if err != nil { return Checkpoint{}, err } - return checkpointer.extractCheckpoint(get) + return checkpointer.extractCheckpoint(&get) } -func getCheckpointFromStorageMetadata(get *azblob.DownloadResponse, partitionID string) (Checkpoint, error) { +func getCheckpointFromStorageMetadata(get *azblob.DownloadStreamResponse, partitionID string) (Checkpoint, error) { checkpoint := Checkpoint{ PartitionID: partitionID, } - metadata := get.NewMetadata() + metadata := get.Metadata - if sequencenumber, ok := metadata["sequencenumber"]; ok { - if !ok { - if sequencenumber, ok = metadata["Sequencenumber"]; !ok { - return Checkpoint{}, fmt.Errorf("sequencenumber on blob not found") - } - } - - if sn, err := strconv.ParseInt(sequencenumber, 10, 64); err == nil { - checkpoint.SequenceNumber = sn - } else { - return Checkpoint{}, fmt.Errorf("sequencenumber is not a valid int64 value: %w", err) + var sequencenumber *string + ok := false + if sequencenumber, ok = metadata["sequencenumber"]; !ok { + if sequencenumber, ok = metadata["Sequencenumber"]; !ok { + return Checkpoint{}, fmt.Errorf("sequencenumber on blob not found") } } + if sn, err := strconv.ParseInt(*sequencenumber, 10, 64); err == nil { + checkpoint.SequenceNumber = sn + } else { + return Checkpoint{}, fmt.Errorf("sequencenumber is not a valid int64 value: %w", err) + } return checkpoint, nil } -func readToCheckpointFromBody(get *azblob.DownloadResponse, checkpoint interface{}) error { +func readToCheckpointFromBody(get *azblob.DownloadStreamResponse, checkpoint interface{}) error { blobData := &bytes.Buffer{} - reader := get.Body(azblob.RetryReaderOptions{}) + reader := get.Body if _, err := blobData.ReadFrom(reader); err != nil { return fmt.Errorf("failed to read blob data: %w", err) } @@ -321,13 +288,3 @@ func readToCheckpointFromBody(get *azblob.DownloadResponse, checkpoint interface return nil } - -func downloadBlob(ctx context.Context, baseURL *url.URL, blobCreds azblob.Credential) (*azblob.DownloadResponse, error) { - blobURL := azblob.NewBlockBlobURL(*baseURL, azblob.NewPipeline(blobCreds, azblob.PipelineOptions{})) - - get, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) - if err != nil { - return nil, fmt.Errorf("unable to download file from blob storage: %w", err) - } - return get, nil -} diff --git a/pkg/scalers/azure/azure_eventhub_test.go b/pkg/scalers/azure/azure_eventhub_test.go index 132da68c249..2af8f820719 100644 --- a/pkg/scalers/azure/azure_eventhub_test.go +++ b/pkg/scalers/azure/azure_eventhub_test.go @@ -3,13 +3,15 @@ package azure import ( "bytes" "context" - "errors" "fmt" - "net/url" "strconv" "testing" + "time" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -23,6 +25,7 @@ func TestCheckpointFromBlobStorageAzureFunction(t *testing.T) { return } + ctx := context.Background() partitionID := "0" consumerGroup := "$Default1" @@ -31,16 +34,7 @@ func TestCheckpointFromBlobStorageAzureFunction(t *testing.T) { containerName := "azure-webjobs-eventhub" checkpointFormat := "{\"SequenceNumber\":%d,\"PartitionId\":\"%s\",\"Owner\":\"\",\"Token\":\"\",\"Epoch\":0}" checkpoint := fmt.Sprintf(checkpointFormat, sequencenumber, partitionID) - urlPath := fmt.Sprintf("eventhubnamespace.servicebus.windows.net/hub/%s/", consumerGroup) - - ctx, err := createNewCheckpointInStorage(urlPath, containerName, partitionID, checkpoint, nil) - assert.Equal(t, err, nil) - - expectedCheckpoint := Checkpoint{ - PartitionID: partitionID, - SequenceNumber: sequencenumber, - } - + urlPath := fmt.Sprintf("eventhubnamespace.servicebus.windows.net/hub/%s/%s", consumerGroup, partitionID) eventHubInfo := EventHubInfo{ EventHubConnection: "Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=hub", StorageConnection: StorageConnectionString, @@ -48,7 +42,18 @@ func TestCheckpointFromBlobStorageAzureFunction(t *testing.T) { EventHubName: "hub", } - check, _ := GetCheckpointFromBlobStorage(ctx, eventHubInfo, "0") + client, err := GetStorageBlobClient(logr.Discard(), eventHubInfo.PodIdentity, eventHubInfo.StorageConnection, eventHubInfo.StorageAccountName, eventHubInfo.BlobStorageEndpoint, 3*time.Second) + assert.NoError(t, err, "error creting the blob client") + + err = createNewCheckpointInStorage(ctx, client, containerName, urlPath, checkpoint, nil) + assert.NoError(t, err, "error creating checkoiunt") + + expectedCheckpoint := Checkpoint{ + PartitionID: partitionID, + SequenceNumber: sequencenumber, + } + + check, _ := GetCheckpointFromBlobStorage(ctx, client, eventHubInfo, "0") assert.Equal(t, check, expectedCheckpoint) } @@ -57,6 +62,7 @@ func TestCheckpointFromBlobStorageDefault(t *testing.T) { return } + ctx := context.Background() partitionID := "1" consumerGroup := "$Default2" @@ -65,15 +71,7 @@ func TestCheckpointFromBlobStorageDefault(t *testing.T) { containerName := "defaultcontainer" checkpointFormat := "{\"SequenceNumber\":%d,\"PartitionId\":\"%s\",\"Owner\":\"\",\"Token\":\"\",\"Epoch\":0}" checkpoint := fmt.Sprintf(checkpointFormat, sequencenumber, partitionID) - urlPath := fmt.Sprintf("%s/", consumerGroup) - - ctx, err := createNewCheckpointInStorage(urlPath, containerName, partitionID, checkpoint, nil) - assert.Equal(t, err, nil) - - expectedCheckpoint := Checkpoint{ - PartitionID: partitionID, - SequenceNumber: sequencenumber, - } + urlPath := fmt.Sprintf("%s/%s", consumerGroup, partitionID) eventHubInfo := EventHubInfo{ EventHubConnection: "Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=hub", @@ -82,8 +80,18 @@ func TestCheckpointFromBlobStorageDefault(t *testing.T) { EventHubName: "hub", BlobContainer: containerName, } + client, err := GetStorageBlobClient(logr.Discard(), eventHubInfo.PodIdentity, eventHubInfo.StorageConnection, eventHubInfo.StorageAccountName, eventHubInfo.BlobStorageEndpoint, 3*time.Second) + assert.NoError(t, err, "error creting the blob client") + + err = createNewCheckpointInStorage(ctx, client, containerName, urlPath, checkpoint, nil) + assert.NoError(t, err, "error creating checkoiunt") - check, _ := GetCheckpointFromBlobStorage(ctx, eventHubInfo, partitionID) + expectedCheckpoint := Checkpoint{ + PartitionID: partitionID, + SequenceNumber: sequencenumber, + } + + check, _ := GetCheckpointFromBlobStorage(ctx, client, eventHubInfo, partitionID) assert.Equal(t, check, expectedCheckpoint) } @@ -92,6 +100,7 @@ func TestCheckpointFromBlobStorageDefaultDeprecatedPythonCheckpoint(t *testing.T return } + ctx := context.Background() partitionID := "2" consumerGroup := "$Default3" @@ -100,15 +109,7 @@ func TestCheckpointFromBlobStorageDefaultDeprecatedPythonCheckpoint(t *testing.T containerName := "defaultcontainerpython" checkpointFormat := "{\"sequence_number\":%d,\"partition_id\":\"%s\",\"Owner\":\"\",\"Token\":\"\",\"Epoch\":0}" checkpoint := fmt.Sprintf(checkpointFormat, sequencenumber, partitionID) - urlPath := fmt.Sprintf("%s/", consumerGroup) - - ctx, err := createNewCheckpointInStorage(urlPath, containerName, partitionID, checkpoint, nil) - assert.Equal(t, err, nil) - - expectedCheckpoint := Checkpoint{ - PartitionID: partitionID, - SequenceNumber: sequencenumber, - } + urlPath := fmt.Sprintf("%s/%s", consumerGroup, partitionID) eventHubInfo := EventHubInfo{ EventHubConnection: "Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=hub", @@ -118,7 +119,18 @@ func TestCheckpointFromBlobStorageDefaultDeprecatedPythonCheckpoint(t *testing.T BlobContainer: containerName, } - check, _ := GetCheckpointFromBlobStorage(ctx, eventHubInfo, partitionID) + client, err := GetStorageBlobClient(logr.Discard(), eventHubInfo.PodIdentity, eventHubInfo.StorageConnection, eventHubInfo.StorageAccountName, eventHubInfo.BlobStorageEndpoint, 3*time.Second) + assert.NoError(t, err, "error creting the blob client") + + err = createNewCheckpointInStorage(ctx, client, containerName, urlPath, checkpoint, nil) + assert.NoError(t, err, "error creating checkoiunt") + + expectedCheckpoint := Checkpoint{ + PartitionID: partitionID, + SequenceNumber: sequencenumber, + } + + check, _ := GetCheckpointFromBlobStorage(ctx, client, eventHubInfo, partitionID) assert.Equal(t, check, expectedCheckpoint) } @@ -127,25 +139,18 @@ func TestCheckpointFromBlobStorageWithBlobMetadata(t *testing.T) { return } + ctx := context.Background() partitionID := "4" consumerGroup := "$default" sequencenumber := int64(1) - - metadata := map[string]string{ - "sequencenumber": strconv.FormatInt(sequencenumber, 10), + sequencenumberString := strconv.FormatInt(sequencenumber, 10) + metadata := map[string]*string{ + "sequencenumber": &sequencenumberString, } containerName := "blobmetadatacontainer" - urlPath := fmt.Sprintf("eventhubnamespace.servicebus.windows.net/hub/%s/checkpoint/", consumerGroup) - - ctx, err := createNewCheckpointInStorage(urlPath, containerName, partitionID, "", metadata) - assert.Equal(t, err, nil) - - expectedCheckpoint := Checkpoint{ - PartitionID: partitionID, - SequenceNumber: sequencenumber, - } + urlPath := fmt.Sprintf("eventhubnamespace.servicebus.windows.net/hub/%s/checkpoint/%s", consumerGroup, partitionID) eventHubInfo := EventHubInfo{ EventHubConnection: "Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=hub", @@ -156,7 +161,18 @@ func TestCheckpointFromBlobStorageWithBlobMetadata(t *testing.T) { CheckpointStrategy: "blobMetadata", } - check, _ := GetCheckpointFromBlobStorage(ctx, eventHubInfo, partitionID) + client, err := GetStorageBlobClient(logr.Discard(), eventHubInfo.PodIdentity, eventHubInfo.StorageConnection, eventHubInfo.StorageAccountName, eventHubInfo.BlobStorageEndpoint, 3*time.Second) + assert.NoError(t, err, "error creting the blob client") + + err = createNewCheckpointInStorage(ctx, client, containerName, urlPath, "", metadata) + assert.NoError(t, err, "error creating checkoiunt") + + expectedCheckpoint := Checkpoint{ + PartitionID: partitionID, + SequenceNumber: sequencenumber, + } + + check, _ := GetCheckpointFromBlobStorage(ctx, client, eventHubInfo, partitionID) assert.Equal(t, check, expectedCheckpoint) } @@ -165,6 +181,7 @@ func TestCheckpointFromBlobStorageGoSdk(t *testing.T) { return } + ctx := context.Background() partitionID := "0" sequencenumber := int64(1) @@ -173,15 +190,7 @@ func TestCheckpointFromBlobStorageGoSdk(t *testing.T) { checkpointFormat := "{\"partitionID\":\"%s\",\"epoch\":0,\"owner\":\"\",\"checkpoint\":{\"sequenceNumber\":%d,\"enqueueTime\":\"\"},\"state\":\"\",\"token\":\"\"}" checkpoint := fmt.Sprintf(checkpointFormat, partitionID, sequencenumber) - urlPath := "" - - ctx, err := createNewCheckpointInStorage(urlPath, containerName, partitionID, checkpoint, nil) - assert.Equal(t, err, nil) - - expectedCheckpoint := Checkpoint{ - PartitionID: partitionID, - SequenceNumber: sequencenumber, - } + urlPath := partitionID eventHubInfo := EventHubInfo{ EventHubConnection: "Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=hub", @@ -191,7 +200,18 @@ func TestCheckpointFromBlobStorageGoSdk(t *testing.T) { CheckpointStrategy: "goSdk", } - check, _ := GetCheckpointFromBlobStorage(ctx, eventHubInfo, partitionID) + client, err := GetStorageBlobClient(logr.Discard(), eventHubInfo.PodIdentity, eventHubInfo.StorageConnection, eventHubInfo.StorageAccountName, eventHubInfo.BlobStorageEndpoint, 3*time.Second) + assert.NoError(t, err, "error creting the blob client") + + err = createNewCheckpointInStorage(ctx, client, containerName, urlPath, checkpoint, nil) + assert.NoError(t, err, "error creating checkoiunt") + + expectedCheckpoint := Checkpoint{ + PartitionID: partitionID, + SequenceNumber: sequencenumber, + } + + check, _ := GetCheckpointFromBlobStorage(ctx, client, eventHubInfo, partitionID) assert.Equal(t, check, expectedCheckpoint) } @@ -200,35 +220,40 @@ func TestCheckpointFromBlobStorageDapr(t *testing.T) { return } + ctx := context.Background() partitionID := "0" consumerGroup := "$default" eventhubName := "hub" sequencenumber := int64(1) - containerName := fmt.Sprintf("dapr-%s-%s-%s", eventhubName, consumerGroup, partitionID) + containerName := "dapr-container" checkpointFormat := "{\"partitionID\":\"%s\",\"epoch\":0,\"owner\":\"\",\"checkpoint\":{\"sequenceNumber\":%d,\"enqueueTime\":\"\"},\"state\":\"\",\"token\":\"\"}" checkpoint := fmt.Sprintf(checkpointFormat, partitionID, sequencenumber) - urlPath := "" + urlPath := fmt.Sprintf("dapr-%s-%s-%s", eventhubName, consumerGroup, partitionID) + + eventHubInfo := EventHubInfo{ + EventHubConnection: fmt.Sprintf("Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=%s", eventhubName), + StorageConnection: StorageConnectionString, + EventHubName: eventhubName, + BlobContainer: containerName, + EventHubConsumerGroup: consumerGroup, + CheckpointStrategy: "dapr", + } + + client, err := GetStorageBlobClient(logr.Discard(), eventHubInfo.PodIdentity, eventHubInfo.StorageConnection, eventHubInfo.StorageAccountName, eventHubInfo.BlobStorageEndpoint, 3*time.Second) + assert.NoError(t, err, "error creting the blob client") - ctx, err := createNewCheckpointInStorage(urlPath, containerName, partitionID, checkpoint, nil) - assert.Equal(t, err, nil) + err = createNewCheckpointInStorage(ctx, client, containerName, urlPath, checkpoint, nil) + assert.NoError(t, err, "error creating checkoiunt") expectedCheckpoint := Checkpoint{ PartitionID: partitionID, SequenceNumber: sequencenumber, } - eventHubInfo := EventHubInfo{ - EventHubConnection: fmt.Sprintf("Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=%s", eventhubName), - StorageConnection: StorageConnectionString, - EventHubName: eventhubName, - BlobContainer: containerName, - CheckpointStrategy: "dapr", - } - - check, _ := GetCheckpointFromBlobStorage(ctx, eventHubInfo, partitionID) + check, _ := GetCheckpointFromBlobStorage(ctx, client, eventHubInfo, partitionID) assert.Equal(t, check, expectedCheckpoint) } @@ -239,9 +264,10 @@ func TestShouldParseCheckpointForFunction(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/azure-webjobs-eventhub/eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") + assert.Equal(t, container, "azure-webjobs-eventhub") + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") } func TestShouldParseCheckpointForFunctionWithCheckpointStrategy(t *testing.T) { @@ -252,9 +278,10 @@ func TestShouldParseCheckpointForFunctionWithCheckpointStrategy(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/azure-webjobs-eventhub/eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") + assert.Equal(t, container, "azure-webjobs-eventhub") + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") } func TestShouldParseCheckpointForFunctionWithPodIdentity(t *testing.T) { @@ -267,15 +294,17 @@ func TestShouldParseCheckpointForFunctionWithPodIdentity(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/azure-webjobs-eventhub/eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") + assert.Equal(t, container, "azure-webjobs-eventhub") + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") eventHubInfo.PodIdentity = kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzureWorkload} cp = newCheckpointer(eventHubInfo, "0") - url, _ = cp.resolvePath(eventHubInfo) + container, path, _ = cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/azure-webjobs-eventhub/eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") + assert.Equal(t, container, "azure-webjobs-eventhub") + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") } func TestShouldParseCheckpointForFunctionWithCheckpointStrategyAndPodIdentity(t *testing.T) { @@ -289,15 +318,17 @@ func TestShouldParseCheckpointForFunctionWithCheckpointStrategyAndPodIdentity(t } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/azure-webjobs-eventhub/eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") + assert.Equal(t, container, "azure-webjobs-eventhub") + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") eventHubInfo.PodIdentity = kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzureWorkload} cp = newCheckpointer(eventHubInfo, "0") - url, _ = cp.resolvePath(eventHubInfo) + container, path, _ = cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/azure-webjobs-eventhub/eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") + assert.Equal(t, container, "azure-webjobs-eventhub") + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$Default/0") } func TestShouldParseCheckpointForDefault(t *testing.T) { @@ -308,23 +339,25 @@ func TestShouldParseCheckpointForDefault(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/DefaultContainer/$Default/0") + assert.Equal(t, container, eventHubInfo.BlobContainer) + assert.Equal(t, path, "$Default/0") } func TestShouldParseCheckpointForBlobMetadata(t *testing.T) { eventHubInfo := EventHubInfo{ - EventHubConnection: "Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=hub-test", + EventHubConnection: "Endpoint=sb://eventhubnamespace.servicebus.windows.net/;EntityPath=hub-test;", EventHubConsumerGroup: "$Default", BlobContainer: "containername", CheckpointStrategy: "blobMetadata", } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/containername/eventhubnamespace.servicebus.windows.net/hub-test/$default/checkpoint/0") + assert.Equal(t, container, eventHubInfo.BlobContainer) + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$default/checkpoint/0") } func TestShouldParseCheckpointForBlobMetadataWithError(t *testing.T) { @@ -336,11 +369,8 @@ func TestShouldParseCheckpointForBlobMetadataWithError(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - _, err := cp.resolvePath(eventHubInfo) - - if err == nil { - t.Errorf("Should have return an err on invalid url characters") - } + _, _, err := cp.resolvePath(eventHubInfo) + assert.Error(t, err, "Should have return an err on invalid url characters") } func TestShouldParseCheckpointForBlobMetadataWithPodIdentity(t *testing.T) { @@ -354,9 +384,10 @@ func TestShouldParseCheckpointForBlobMetadataWithPodIdentity(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/containername/eventhubnamespace.servicebus.windows.net/hub-test/$default/checkpoint/0") + assert.Equal(t, container, eventHubInfo.BlobContainer) + assert.Equal(t, path, "eventhubnamespace.servicebus.windows.net/hub-test/$default/checkpoint/0") } func TestShouldParseCheckpointForGoSdk(t *testing.T) { @@ -368,9 +399,10 @@ func TestShouldParseCheckpointForGoSdk(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/containername/0") + assert.Equal(t, container, eventHubInfo.BlobContainer) + assert.Equal(t, path, "0") } func TestShouldParseCheckpointForDapr(t *testing.T) { @@ -382,9 +414,10 @@ func TestShouldParseCheckpointForDapr(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/containername/dapr-hub-test-$default-0") + assert.Equal(t, container, eventHubInfo.BlobContainer) + assert.Equal(t, path, "dapr-hub-test-$default-0") } func TestShouldParseCheckpointForDaprWithPodIdentity(t *testing.T) { @@ -398,43 +431,25 @@ func TestShouldParseCheckpointForDaprWithPodIdentity(t *testing.T) { } cp := newCheckpointer(eventHubInfo, "0") - url, _ := cp.resolvePath(eventHubInfo) + container, path, _ := cp.resolvePath(eventHubInfo) - assert.Equal(t, url.Path, "/containername/dapr-hub-test-$default-0") + assert.Equal(t, container, eventHubInfo.BlobContainer) + assert.Equal(t, path, "dapr-hub-test-$default-0") } -func createNewCheckpointInStorage(urlPath string, containerName string, partitionID string, checkpoint string, metadata map[string]string) (context.Context, error) { - ctx := context.Background() - - credential, endpoint, _ := ParseAzureStorageBlobConnection(ctx, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, StorageConnectionString, "", "") - +func createNewCheckpointInStorage(ctx context.Context, client *azblob.Client, containerName string, path string, checkpoint string, metadata map[string]*string) error { // Create container - path, _ := url.Parse(containerName) - url := endpoint.ResolveReference(path) - containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{})) - _, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - - err = errors.Unwrap(err) - if err != nil { - if stErr, ok := err.(azblob.StorageError); ok { - if stErr.ServiceCode() == azblob.ServiceCodeContainerAlreadyExists { - return ctx, fmt.Errorf("failed to create container: %w", err) - } - } + _, err := client.CreateContainer(ctx, containerName, nil) + if err != nil && !bloberror.HasCode(err, bloberror.ContainerAlreadyExists) { + return fmt.Errorf("failed to create container: %w", err) } - - blobFolderURL := containerURL.NewBlockBlobURL(urlPath + partitionID) - var b bytes.Buffer b.WriteString(checkpoint) // Upload file - _, err = azblob.UploadBufferToBlockBlob(ctx, b.Bytes(), blobFolderURL, azblob.UploadToBlockBlobOptions{ - BlockSize: 4 * 1024 * 1024, - Metadata: metadata, - Parallelism: 16}) - if err != nil { - return ctx, fmt.Errorf("Err uploading file to blob: %w", err) - } - return ctx, nil + _, err = client.UploadBuffer(ctx, containerName, path, b.Bytes(), &blockblob.UploadBufferOptions{ + BlockSize: 4 * 1024 * 1024, + Metadata: metadata, + }) + return err } diff --git a/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go index c4f739c454e..a9428101bd5 100644 --- a/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go +++ b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go @@ -4,7 +4,9 @@ import ( "fmt" "net/http" "strings" + "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" az "github.com/Azure/go-autorest/autorest/azure" @@ -22,6 +24,7 @@ var azureManagedPrometheusResourceURLInCloud = map[string]string{ type azureManagedPrometheusHTTPRoundTripper struct { chainedCredential *azidentity.ChainedTokenCredential + token azcore.AccessToken next http.RoundTripper resourceURL string } @@ -35,7 +38,7 @@ func TryAndGetAzureManagedPrometheusHTTPRoundTripper(logger logr.Logger, podIden return nil, fmt.Errorf("trigger metadata cannot be nil") } - chainedCred, err := NewChainedCredential(logger, podIdentity.GetIdentityID(), podIdentity.GetIdentityTenantID(), podIdentity.Provider) + chainedCred, err := NewChainedCredential(logger, podIdentity) if err != nil { return nil, err } @@ -68,13 +71,15 @@ func TryAndGetAzureManagedPrometheusHTTPRoundTripper(logger logr.Logger, podIden // RoundTrip sets authorization header for requests func (rt *azureManagedPrometheusHTTPRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - token, err := rt.chainedCredential.GetToken(req.Context(), policy.TokenRequestOptions{Scopes: []string{rt.resourceURL}}) - - if err != nil { - return nil, err + if rt.token.ExpiresOn.Before(time.Now().Add(time.Second * 60)) { + token, err := rt.chainedCredential.GetToken(req.Context(), policy.TokenRequestOptions{Scopes: []string{rt.resourceURL}}) + if err != nil { + return nil, err + } + rt.token = token } - bearerAccessToken := "Bearer " + token.Token + bearerAccessToken := "Bearer " + rt.token.Token req.Header.Set("Authorization", bearerAccessToken) return rt.next.RoundTrip(req) diff --git a/pkg/scalers/azure/azure_monitor.go b/pkg/scalers/azure/azure_monitor.go deleted file mode 100644 index 4554ede572f..00000000000 --- a/pkg/scalers/azure/azure_monitor.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Copyright 2021 The KEDA Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "context" - "fmt" - "strconv" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights" - "github.com/Azure/go-autorest/autorest/azure/auth" - logf "sigs.k8s.io/controller-runtime/pkg/log" - - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" -) - -// Much of the code in this file is taken from the Azure Kubernetes Metrics Adapter -// https://github.com/Azure/azure-k8s-metrics-adapter/tree/master/pkg/azure/externalmetrics - -type azureExternalMetricRequest struct { - MetricName string - MetricNamespace string - SubscriptionID string - ResourceName string - ResourceProviderNamespace string - ResourceType string - Aggregation string - Timespan string - Filter string - ResourceGroup string -} - -// MonitorInfo to create metric request -type MonitorInfo struct { - ResourceURI string - TenantID string - SubscriptionID string - ResourceGroupName string - Name string - Namespace string - Filter string - AggregationInterval string - AggregationType string - ClientID string - ClientPassword string - AzureResourceManagerEndpoint string - ActiveDirectoryEndpoint string -} - -var azureMonitorLog = logf.Log.WithName("azure_monitor_scaler") - -// GetAzureMetricValue returns the value of an Azure Monitor metric, rounded to the nearest int -func GetAzureMetricValue(ctx context.Context, info MonitorInfo, podIdentity kedav1alpha1.AuthPodIdentity) (float64, error) { - client := createMetricsClient(ctx, info, podIdentity) - requestPtr, err := createMetricsRequest(info) - if err != nil { - return -1, err - } - - return executeRequest(ctx, client, requestPtr) -} - -func createMetricsClient(ctx context.Context, info MonitorInfo, podIdentity kedav1alpha1.AuthPodIdentity) insights.MetricsClient { - client := insights.NewMetricsClientWithBaseURI(info.AzureResourceManagerEndpoint, info.SubscriptionID) - var authConfig auth.AuthorizerConfig - switch podIdentity.Provider { - case "", kedav1alpha1.PodIdentityProviderNone: - config := auth.NewClientCredentialsConfig(info.ClientID, info.ClientPassword, info.TenantID) - config.Resource = info.AzureResourceManagerEndpoint - config.AADEndpoint = info.ActiveDirectoryEndpoint - - authConfig = config - case kedav1alpha1.PodIdentityProviderAzureWorkload: - authConfig = NewAzureADWorkloadIdentityConfig(ctx, podIdentity.GetIdentityID(), podIdentity.GetIdentityTenantID(), podIdentity.GetIdentityAuthorityHost(), info.AzureResourceManagerEndpoint) - } - - authorizer, _ := authConfig.Authorizer() - client.Authorizer = authorizer - - return client -} - -func createMetricsRequest(info MonitorInfo) (*azureExternalMetricRequest, error) { - metricRequest := azureExternalMetricRequest{ - MetricName: info.Name, - MetricNamespace: info.Namespace, - SubscriptionID: info.SubscriptionID, - Aggregation: info.AggregationType, - Filter: info.Filter, - ResourceGroup: info.ResourceGroupName, - } - - resourceInfo := strings.Split(info.ResourceURI, "/") - metricRequest.ResourceProviderNamespace = resourceInfo[0] - metricRequest.ResourceType = resourceInfo[1] - metricRequest.ResourceName = resourceInfo[2] - - // if no timespan is provided, defaults to 5 minutes - timespan, err := formatTimeSpan(info.AggregationInterval) - if err != nil { - return nil, err - } - - metricRequest.Timespan = timespan - - return &metricRequest, nil -} - -func executeRequest(ctx context.Context, client insights.MetricsClient, request *azureExternalMetricRequest) (float64, error) { - metricResponse, err := getAzureMetric(ctx, client, *request) - if err != nil { - return -1, fmt.Errorf("error getting azure monitor metric %s: %w", request.MetricName, err) - } - - return metricResponse, nil -} - -func getAzureMetric(ctx context.Context, client insights.MetricsClient, azMetricRequest azureExternalMetricRequest) (float64, error) { - err := azMetricRequest.validate() - if err != nil { - return -1, err - } - - metricResourceURI := azMetricRequest.metricResourceURI() - azureMonitorLog.V(2).Info("metric request", "resource uri", metricResourceURI) - - metricResult, err := client.List(ctx, metricResourceURI, - azMetricRequest.Timespan, nil, - azMetricRequest.MetricName, azMetricRequest.Aggregation, nil, - "", azMetricRequest.Filter, "", azMetricRequest.MetricNamespace) - if err != nil { - return -1, err - } - - value, err := extractValue(azMetricRequest, metricResult) - - return value, err -} - -func extractValue(azMetricRequest azureExternalMetricRequest, metricResult insights.Response) (float64, error) { - metricVals := *metricResult.Value - - if len(metricVals) == 0 { - err := fmt.Errorf("got an empty response for metric %s/%s and aggregate type %s", azMetricRequest.ResourceProviderNamespace, azMetricRequest.MetricName, insights.AggregationType(strings.ToTitle(azMetricRequest.Aggregation))) - return -1, err - } - - timeseriesPtr := metricVals[0].Timeseries - if timeseriesPtr == nil || len(*timeseriesPtr) == 0 { - err := fmt.Errorf("got metric result for %s/%s and aggregate type %s without timeseries", azMetricRequest.ResourceProviderNamespace, azMetricRequest.MetricName, insights.AggregationType(strings.ToTitle(azMetricRequest.Aggregation))) - return -1, err - } - - dataPtr := (*timeseriesPtr)[0].Data - if dataPtr == nil || len(*dataPtr) == 0 { - err := fmt.Errorf("got metric result for %s/%s and aggregate type %s without any metric values", azMetricRequest.ResourceProviderNamespace, azMetricRequest.MetricName, insights.AggregationType(strings.ToTitle(azMetricRequest.Aggregation))) - return -1, err - } - - valuePtr, err := verifyAggregationTypeIsSupported(azMetricRequest.Aggregation, *dataPtr) - if err != nil { - return -1, fmt.Errorf("unable to get value for metric %s/%s with aggregation %s. No value returned by Azure Monitor", azMetricRequest.ResourceProviderNamespace, azMetricRequest.MetricName, azMetricRequest.Aggregation) - } - - azureMonitorLog.V(2).Info("value extracted from metric request", "metric type", azMetricRequest.Aggregation, "metric value", *valuePtr) - - return *valuePtr, nil -} - -func (amr azureExternalMetricRequest) validate() error { - if amr.MetricName == "" { - return fmt.Errorf("metricName is required") - } - if amr.ResourceGroup == "" { - return fmt.Errorf("resourceGroup is required") - } - if amr.SubscriptionID == "" { - return fmt.Errorf("subscriptionID is required. set a default or pass via label selectors") - } - return nil -} - -func (amr azureExternalMetricRequest) metricResourceURI() string { - return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", - amr.SubscriptionID, - amr.ResourceGroup, - amr.ResourceProviderNamespace, - amr.ResourceType, - amr.ResourceName) -} - -// formatTimeSpan defaults to a 5 minute timespan if the user does not provide one -func formatTimeSpan(timeSpan string) (string, error) { - endtime := time.Now().UTC().Format(time.RFC3339) - starttime := time.Now().Add(-(5 * time.Minute)).UTC().Format(time.RFC3339) - if timeSpan != "" { - aggregationInterval := strings.Split(timeSpan, ":") - hours, herr := strconv.Atoi(aggregationInterval[0]) - minutes, merr := strconv.Atoi(aggregationInterval[1]) - seconds, serr := strconv.Atoi(aggregationInterval[2]) - - if herr != nil || merr != nil || serr != nil { - return "", fmt.Errorf("errors parsing metricAggregationInterval: %v, %v, %w", herr, merr, serr) - } - - starttime = time.Now().Add(-(time.Duration(hours)*time.Hour + time.Duration(minutes)*time.Minute + time.Duration(seconds)*time.Second)).UTC().Format(time.RFC3339) - } - return fmt.Sprintf("%s/%s", starttime, endtime), nil -} - -func verifyAggregationTypeIsSupported(aggregationType string, data []insights.MetricValue) (*float64, error) { - var valuePtr *float64 - switch { - case strings.EqualFold(string(insights.Average), aggregationType) && data[len(data)-1].Average != nil: - valuePtr = data[len(data)-1].Average - case strings.EqualFold(string(insights.Total), aggregationType) && data[len(data)-1].Total != nil: - valuePtr = data[len(data)-1].Total - case strings.EqualFold(string(insights.Maximum), aggregationType) && data[len(data)-1].Maximum != nil: - valuePtr = data[len(data)-1].Maximum - case strings.EqualFold(string(insights.Minimum), aggregationType) && data[len(data)-1].Minimum != nil: - valuePtr = data[len(data)-1].Minimum - case strings.EqualFold(string(insights.Count), aggregationType) && data[len(data)-1].Count != nil: - valuePtr = data[len(data)-1].Count - default: - err := fmt.Errorf("unsupported aggregation type %s", insights.AggregationType(strings.ToTitle(aggregationType))) - return nil, err - } - return valuePtr, nil -} diff --git a/pkg/scalers/azure/azure_monitor_test.go b/pkg/scalers/azure/azure_monitor_test.go deleted file mode 100644 index 35159ac3c00..00000000000 --- a/pkg/scalers/azure/azure_monitor_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package azure - -import ( - "testing" - - "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights" -) - -type testExtractAzMonitorTestData struct { - testName string - isError bool - expectedValue float64 - metricRequest azureExternalMetricRequest - metricResult insights.Response -} - -var testExtractAzMonitordata = []testExtractAzMonitorTestData{ - {"nothing returned", true, -1, azureExternalMetricRequest{}, insights.Response{Value: &[]insights.Metric{}}}, - {"timeseries null", true, -1, azureExternalMetricRequest{}, insights.Response{Value: &[]insights.Metric{{Timeseries: nil}}}}, - {"timeseries empty", true, -1, azureExternalMetricRequest{}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{}}}}}, - {"data nil", true, -1, azureExternalMetricRequest{}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{{Data: nil}}}}}}, - {"data empty", true, -1, azureExternalMetricRequest{}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{{Data: &[]insights.MetricValue{}}}}}}}, - {"Total Aggregation requested", false, 40, azureExternalMetricRequest{Aggregation: "Total"}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{{Data: &[]insights.MetricValue{{Total: returnFloat64Ptr(40)}}}}}}}}, - {"Average Aggregation requested", false, 41, azureExternalMetricRequest{Aggregation: "Average"}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{{Data: &[]insights.MetricValue{{Average: returnFloat64Ptr(41)}}}}}}}}, - {"Maximum Aggregation requested", false, 42, azureExternalMetricRequest{Aggregation: "Maximum"}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{{Data: &[]insights.MetricValue{{Maximum: returnFloat64Ptr(42)}}}}}}}}, - {"Minimum Aggregation requested", false, 43, azureExternalMetricRequest{Aggregation: "Minimum"}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{{Data: &[]insights.MetricValue{{Minimum: returnFloat64Ptr(43)}}}}}}}}, - {"Count Aggregation requested", false, 44, azureExternalMetricRequest{Aggregation: "Count"}, insights.Response{Value: &[]insights.Metric{{Timeseries: &[]insights.TimeSeriesElement{{Data: &[]insights.MetricValue{{Count: returnFloat64Ptr(44)}}}}}}}}, -} - -func returnFloat64Ptr(x float64) *float64 { - return &x -} - -func TestAzMonitorextractValue(t *testing.T) { - for _, testData := range testExtractAzMonitordata { - value, err := extractValue(testData.metricRequest, testData.metricResult) - if err != nil && !testData.isError { - t.Errorf("Test: %v; Expected success but got error: %v", testData.testName, err) - } - if testData.isError && err == nil { - t.Errorf("Test: %v; Expected error but got success. testData: %v", testData.testName, testData) - } - if err != nil && value != testData.expectedValue { - t.Errorf("Test: %v; Expected value %v but got %v testData: %v", testData.testName, testData.expectedValue, value, testData) - } - } -} diff --git a/pkg/scalers/azure/azure_queue.go b/pkg/scalers/azure/azure_queue.go deleted file mode 100644 index 39ddaa7eacc..00000000000 --- a/pkg/scalers/azure/azure_queue.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2021 The KEDA Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "context" - - "github.com/Azure/azure-storage-queue-go/azqueue" - - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" -) - -// GetAzureQueueLength returns the length of a queue in int, see https://learn.microsoft.com/en-us/azure/storage/queues/storage-dotnet-how-to-use-queues?tabs=dotnet#get-the-queue-length -func GetAzureQueueLength(ctx context.Context, podIdentity kedav1alpha1.AuthPodIdentity, connectionString, queueName, accountName, endpointSuffix string) (int64, error) { - credential, endpoint, err := ParseAzureStorageQueueConnection(ctx, podIdentity, connectionString, accountName, endpointSuffix) - if err != nil { - return -1, err - } - - p := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) - serviceURL := azqueue.NewServiceURL(*endpoint, p) - queueURL := serviceURL.NewQueueURL(queueName) - - props, err := queueURL.GetProperties(ctx) - if err != nil { - return -1, err - } - - return int64(props.ApproximateMessagesCount()), nil -} diff --git a/pkg/scalers/azure/azure_queue_test.go b/pkg/scalers/azure/azure_queue_test.go deleted file mode 100644 index eb47cfb0c59..00000000000 --- a/pkg/scalers/azure/azure_queue_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package azure - -import ( - "context" - "encoding/base64" - "errors" - "testing" - - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" -) - -func TestGetQueueLength(t *testing.T) { - length, err := GetAzureQueueLength(context.TODO(), kedav1alpha1.AuthPodIdentity{}, "", "queueName", "", "") - if length != -1 { - t.Error("Expected length to be -1, but got", length) - } - - if err == nil { - t.Error("Expected error for empty connection string, but got nil") - } - - if !errors.Is(err, ErrAzureConnectionStringKeyName) { - t.Error("Expected error to contain parsing error message, but got", err.Error()) - } - - length, err = GetAzureQueueLength(context.TODO(), kedav1alpha1.AuthPodIdentity{}, "DefaultEndpointsProtocol=https;AccountName=name;AccountKey=key==;EndpointSuffix=core.windows.net", "queueName", "", "") - - if length != -1 { - t.Error("Expected length to be -1, but got", length) - } - - if err == nil { - t.Error("Expected error for empty connection string, but got nil") - } - - var base64Error base64.CorruptInputError - if !errors.As(err, &base64Error) { - t.Error("Expected error to contain base64 error message, but got", err.Error()) - } -} diff --git a/pkg/scalers/azure/azure_storage.go b/pkg/scalers/azure/azure_storage.go index 3413e75c369..77ba67b32e3 100644 --- a/pkg/scalers/azure/azure_storage.go +++ b/pkg/scalers/azure/azure_storage.go @@ -17,17 +17,18 @@ limitations under the License. package azure import ( - "context" "errors" "fmt" - "net/url" - "strings" + "time" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" az "github.com/Azure/go-autorest/autorest/azure" + "github.com/go-logr/logr" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + kedautil "github.com/kedacore/keda/v2/pkg/util" ) /* ParseAzureStorageConnectionString parses a storage account connection string into (endpointProtocol, accountName, key, endpointSuffix) @@ -49,11 +50,6 @@ const ( FileEndpoint ) -const ( - // Azure storage resource is "https://storage.azure.com/" in all cloud environments - storageResource = "https://storage.azure.com/" -) - var ( // ErrAzureConnectionStringKeyName indicates an error in the connection string AccountKey or AccountName. ErrAzureConnectionStringKeyName = errors.New("can't parse storage connection string. Missing key or name") @@ -86,151 +82,56 @@ func ParseAzureStorageEndpointSuffix(metadata map[string]string, endpointType St return ParseEnvironmentProperty(metadata, DefaultEndpointSuffixKey, envSuffixProvider) } -// ParseAzureStorageQueueConnection parses queue connection string and returns credential and resource url -func ParseAzureStorageQueueConnection(ctx context.Context, podIdentity kedav1alpha1.AuthPodIdentity, connectionString, accountName, endpointSuffix string) (azqueue.Credential, *url.URL, error) { - switch podIdentity.Provider { - case kedav1alpha1.PodIdentityProviderAzureWorkload: - token, endpoint, err := parseAccessTokenAndEndpoint(ctx, accountName, endpointSuffix, podIdentity) - if err != nil { - return nil, nil, err - } - - credential := azqueue.NewTokenCredential(token, nil) - return credential, endpoint, nil - case "", kedav1alpha1.PodIdentityProviderNone: - endpoint, accountName, accountKey, err := parseAzureStorageConnectionString(connectionString, QueueEndpoint) - if err != nil { - return nil, nil, err - } - - if accountName == "" && accountKey == "" { - return azqueue.NewAnonymousCredential(), endpoint, nil - } - - credential, err := azqueue.NewSharedKeyCredential(accountName, accountKey) - if err != nil { - return nil, nil, err - } - - return credential, endpoint, nil - default: - return nil, nil, fmt.Errorf("azure queues doesn't support %s pod identity type", podIdentity.Provider) +// GetStorageBlobClient returns storage blob client +func GetStorageBlobClient(logger logr.Logger, podIdentity kedav1alpha1.AuthPodIdentity, connectionString, accountName, endpointSuffix string, timeout time.Duration) (*azblob.Client, error) { + opts := &azblob.ClientOptions{ + ClientOptions: policy.ClientOptions{ + Transport: kedautil.CreateHTTPClient(timeout, false), + }, } -} -// ParseAzureStorageBlobConnection parses blob connection string and returns credential and resource url -func ParseAzureStorageBlobConnection(ctx context.Context, podIdentity kedav1alpha1.AuthPodIdentity, connectionString, accountName, endpointSuffix string) (azblob.Credential, *url.URL, error) { switch podIdentity.Provider { - case kedav1alpha1.PodIdentityProviderAzureWorkload: - token, endpoint, err := parseAccessTokenAndEndpoint(ctx, accountName, endpointSuffix, podIdentity) - if err != nil { - return nil, nil, err - } - - credential := azblob.NewTokenCredential(token, nil) - return credential, endpoint, nil case "", kedav1alpha1.PodIdentityProviderNone: - endpoint, accountName, accountKey, err := parseAzureStorageConnectionString(connectionString, BlobEndpoint) + blobClient, err := azblob.NewClientFromConnectionString(connectionString, opts) if err != nil { - return nil, nil, err - } - - if accountName == "" && accountKey == "" { - return azblob.NewAnonymousCredential(), endpoint, nil + return nil, fmt.Errorf("failed to create hub client: %w", err) } - - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) - if err != nil { - return nil, nil, err + return blobClient, nil + case kedav1alpha1.PodIdentityProviderAzureWorkload: + creds, chainedErr := NewChainedCredential(logger, podIdentity) + if chainedErr != nil { + return nil, chainedErr } - - return credential, endpoint, nil - default: - return nil, nil, fmt.Errorf("azure storage doesn't support %s pod identity type", podIdentity.Provider) + srvURL := fmt.Sprintf("https://%s.%s", accountName, endpointSuffix) + return azblob.NewClient(srvURL, creds, opts) } -} -func parseAzureStorageConnectionString(connectionString string, endpointType StorageEndpointType) (*url.URL, string, string, error) { - parts := strings.Split(connectionString, ";") - - getValue := func(pair string) string { - parts := strings.SplitN(pair, "=", 2) - if len(parts) == 2 { - return parts[1] - } - return "" - } + return nil, fmt.Errorf("event hub does not support pod identity %v", podIdentity.Provider) +} - var endpointProtocol, name, key, sas, endpointSuffix, endpoint string - for _, v := range parts { - switch { - case strings.HasPrefix(v, "DefaultEndpointsProtocol"): - endpointProtocol = getValue(v) - case strings.HasPrefix(v, "AccountName"): - name = getValue(v) - case strings.HasPrefix(v, "AccountKey"): - key = getValue(v) - case strings.HasPrefix(v, "SharedAccessSignature"): - sas = getValue(v) - case strings.HasPrefix(v, "EndpointSuffix"): - endpointSuffix = getValue(v) - case endpointType == BlobEndpoint && strings.HasPrefix(v, endpointType.Prefix()): - endpoint = getValue(v) - case endpointType == QueueEndpoint && strings.HasPrefix(v, endpointType.Prefix()): - endpoint = getValue(v) - case endpointType == TableEndpoint && strings.HasPrefix(v, endpointType.Prefix()): - endpoint = getValue(v) - case endpointType == FileEndpoint && strings.HasPrefix(v, endpointType.Prefix()): - endpoint = getValue(v) - } +// GetStorageQueueClient returns storage queue client +func GetStorageQueueClient(logger logr.Logger, podIdentity kedav1alpha1.AuthPodIdentity, connectionString, accountName, endpointSuffix, queueName string, timeout time.Duration) (*azqueue.QueueClient, error) { + opts := &azqueue.ClientOptions{ + ClientOptions: policy.ClientOptions{ + Transport: kedautil.CreateHTTPClient(timeout, false), + }, } - if sas != "" && endpoint != "" { - u, err := url.Parse(fmt.Sprintf("%s?%s", endpoint, sas)) + switch podIdentity.Provider { + case "", kedav1alpha1.PodIdentityProviderNone: + queueClient, err := azqueue.NewQueueClientFromConnectionString(connectionString, queueName, opts) if err != nil { - return nil, "", "", err + return nil, fmt.Errorf("failed to create hub client: %w", err) } - return u, "", "", nil - } - - if name == "" || key == "" { - return nil, "", "", ErrAzureConnectionStringKeyName - } - - if endpoint != "" { - u, err := url.Parse(endpoint) - if err != nil { - return nil, "", "", err + return queueClient, nil + case kedav1alpha1.PodIdentityProviderAzureWorkload: + creds, chainedErr := NewChainedCredential(logger, podIdentity) + if chainedErr != nil { + return nil, chainedErr } - return u, name, key, nil - } - - if endpointProtocol == "" || endpointSuffix == "" { - return nil, "", "", ErrAzureConnectionStringEndpoint - } - - u, err := url.Parse(fmt.Sprintf("%s://%s.%s.%s", endpointProtocol, name, endpointType.Name(), endpointSuffix)) - if err != nil { - return nil, "", "", err - } - - return u, name, key, nil -} - -func parseAccessTokenAndEndpoint(ctx context.Context, accountName string, endpointSuffix string, - podIdentity kedav1alpha1.AuthPodIdentity) (string, *url.URL, error) { - var token AADToken - var err error - - token, err = GetAzureADWorkloadIdentityToken(ctx, podIdentity.GetIdentityID(), podIdentity.GetIdentityTenantID(), podIdentity.GetIdentityAuthorityHost(), storageResource) - if err != nil { - return "", nil, err - } - - if accountName == "" { - return "", nil, fmt.Errorf("accountName is required for podIdentity azure") + srvURL := fmt.Sprintf("https://%s.%s/%s", accountName, endpointSuffix, queueName) + return azqueue.NewQueueClient(srvURL, creds, opts) } - endpoint, _ := url.Parse(fmt.Sprintf("https://%s.%s", accountName, endpointSuffix)) - return token.AccessToken, endpoint, nil + return nil, fmt.Errorf("event hub does not support pod identity %v", podIdentity.Provider) } diff --git a/pkg/scalers/azure/azure_storage_test.go b/pkg/scalers/azure/azure_storage_test.go index 4309cd8d6dc..aedc0be9a83 100644 --- a/pkg/scalers/azure/azure_storage_test.go +++ b/pkg/scalers/azure/azure_storage_test.go @@ -2,67 +2,6 @@ package azure import "testing" -type parseConnectionStringTestData struct { - connectionString string - accountName string - accountKey string - endpoint string - endpointType StorageEndpointType - isError bool -} - -var parseConnectionStringTestDataset = []parseConnectionStringTestData{ - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net", "testing", "key==", "https://testing.queue.core.windows.net", QueueEndpoint, false}, - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net", "testing", "key==", "https://testing.blob.core.windows.net", BlobEndpoint, false}, - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net", "testing", "key==", "https://testing.table.core.windows.net", TableEndpoint, false}, - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net", "testing", "key==", "https://testing.file.core.windows.net", FileEndpoint, false}, - {"AccountName=testingAccountKey=key==", "", "", "", QueueEndpoint, true}, - {"", "", "", "", QueueEndpoint, true}, - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net;QueueEndpoint=https://queue.net", "testing", "key==", "https://queue.net", QueueEndpoint, false}, - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net;BlobEndpoint=https://blob.net", "testing", "key==", "https://blob.net", BlobEndpoint, false}, - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net;TableEndpoint=https://table.net", "testing", "key==", "https://table.net", TableEndpoint, false}, - {"DefaultEndpointsProtocol=https;AccountName=testing;AccountKey=key==;EndpointSuffix=core.windows.net;FileEndpoint=https://file.net", "testing", "key==", "https://file.net", FileEndpoint, false}, - {"QueueEndpoint=https://queue.net;SharedAccessSignature=sv=2012-02-12&st=2009-02-09&se=2009-02-10&sr=c&sp=r&si=YWJjZGVmZw%3d%3d&sig=dD80ihBh5jfNpymO5Hg1IdiJIEvHcJpCMiCMnN%2fRnbI%3d", "", "", "https://queue.net?sv=2012-02-12&st=2009-02-09&se=2009-02-10&sr=c&sp=r&si=YWJjZGVmZw%3d%3d&sig=dD80ihBh5jfNpymO5Hg1IdiJIEvHcJpCMiCMnN%2fRnbI%3d", QueueEndpoint, false}, - {"BlobEndpoint=https://blob.net;SharedAccessSignature=sv=2012-02-12&st=2009-02-09&se=2009-02-10&sr=c&sp=r&si=YWJjZGVmZw%3d%3d&sig=dD80ihBh5jfNpymO5Hg1IdiJIEvHcJpCMiCMnN%2fRnbI%3d", "", "", "https://blob.net?sv=2012-02-12&st=2009-02-09&se=2009-02-10&sr=c&sp=r&si=YWJjZGVmZw%3d%3d&sig=dD80ihBh5jfNpymO5Hg1IdiJIEvHcJpCMiCMnN%2fRnbI%3d", BlobEndpoint, false}, -} - -func TestParseStorageConnectionString(t *testing.T) { - for _, testData := range parseConnectionStringTestDataset { - endpoint, accountName, accountKey, err := parseAzureStorageConnectionString(testData.connectionString, testData.endpointType) - - if !testData.isError && err != nil { - t.Error("Expected success but got err", err) - } - - if testData.isError && err == nil { - t.Error("Expected error but got nil") - } - - if accountName != testData.accountName { - t.Error( - "For", testData.connectionString, - "expected accountName=", testData.accountName, - "but got", accountName) - } - - if accountKey != testData.accountKey { - t.Error( - "For", testData.connectionString, - "expected accountKey=", testData.accountKey, - "but got", accountKey) - } - - if err == nil { - if endpoint.String() != testData.endpoint { - t.Error( - "For", testData.connectionString, - "expected endpoint=", testData.endpoint, - "but got", endpoint) - } - } - } -} - type parseAzureStorageEndpointSuffixTestData struct { metadata map[string]string endpointSuffix string diff --git a/pkg/scalers/azure_blob_scaler.go b/pkg/scalers/azure_blob_scaler.go index 14c158cd44a..e617012a97b 100644 --- a/pkg/scalers/azure_blob_scaler.go +++ b/pkg/scalers/azure_blob_scaler.go @@ -21,6 +21,7 @@ import ( "fmt" "strconv" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/go-logr/logr" "github.com/gobwas/glob" v2 "k8s.io/api/autoscaling/v2" @@ -44,6 +45,7 @@ type azureBlobScaler struct { metricType v2.MetricTargetType metadata *azure.BlobMetadata podIdentity kedav1alpha1.AuthPodIdentity + blobClient *azblob.Client logger logr.Logger } @@ -61,9 +63,15 @@ func NewAzureBlobScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error parsing azure blob metadata: %w", err) } + blobClient, err := azure.GetStorageBlobClient(logger, podIdentity, meta.Connection, meta.AccountName, meta.EndpointSuffix, config.GlobalHTTPTimeout) + if err != nil { + return nil, fmt.Errorf("error creating azure blob client: %w", err) + } + return &azureBlobScaler{ metricType: metricType, metadata: meta, + blobClient: blobClient, podIdentity: podIdentity, logger: logger, }, nil @@ -73,7 +81,6 @@ func parseAzureBlobMetadata(config *scalersconfig.ScalerConfig, logger logr.Logg meta := azure.BlobMetadata{} meta.TargetBlobCount = defaultTargetBlobCount meta.BlobDelimiter = defaultBlobDelimiter - meta.BlobPrefix = defaultBlobPrefix if val, ok := config.TriggerMetadata[blobCountMetricName]; ok { blobCount, err := strconv.ParseInt(val, 10, 64) @@ -126,7 +133,8 @@ func parseAzureBlobMetadata(config *scalersconfig.ScalerConfig, logger logr.Logg } if val, ok := config.TriggerMetadata["blobPrefix"]; ok && val != "" { - meta.BlobPrefix = val + meta.BlobDelimiter + prefix := val + meta.BlobDelimiter + meta.BlobPrefix = &prefix } endpointSuffix, err := azure.ParseAzureStorageEndpointSuffix(config.TriggerMetadata, azure.BlobEndpoint) @@ -186,7 +194,7 @@ func (s *azureBlobScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSp func (s *azureBlobScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { bloblen, err := azure.GetAzureBlobListLength( ctx, - s.podIdentity, + s.blobClient, s.metadata, ) diff --git a/pkg/scalers/azure_data_explorer_scaler_test.go b/pkg/scalers/azure_data_explorer_scaler_test.go index 3c2a6b2ee7e..a4038197a3c 100644 --- a/pkg/scalers/azure_data_explorer_scaler_test.go +++ b/pkg/scalers/azure_data_explorer_scaler_test.go @@ -40,13 +40,14 @@ type dataExplorerMetricIdentifier struct { } var ( - aadAppClientID = "eebdbbab-cf74-4791-a5c6-1ef5d90b1fa8" - aadAppSecret = "test_app_secret" - azureTenantID = "8fe57c22-02b1-4b87-8c24-ae21dea4fa6a" - databaseName = "test_database" - dataExplorerQuery = "print 3" - dataExplorerThreshold = "1" - dataExplorerEndpoint = "https://test-keda-e2e.eastus.kusto.windows.net" + aadAppClientID = "eebdbbab-cf74-4791-a5c6-1ef5d90b1fa8" + aadAppSecret = "test_app_secret" + activeDirectoryEndpoint = "activeDirectoryEndpoint" + azureTenantID = "8fe57c22-02b1-4b87-8c24-ae21dea4fa6a" + databaseName = "test_database" + dataExplorerQuery = "print 3" + dataExplorerThreshold = "1" + dataExplorerEndpoint = "https://test-keda-e2e.eastus.kusto.windows.net" ) // Valid auth params with aad application and passwd diff --git a/pkg/scalers/azure_eventhub_scaler.go b/pkg/scalers/azure_eventhub_scaler.go index 3f12d387f96..de262e38b1e 100644 --- a/pkg/scalers/azure_eventhub_scaler.go +++ b/pkg/scalers/azure_eventhub_scaler.go @@ -24,8 +24,9 @@ import ( "strconv" "strings" - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" az "github.com/Azure/go-autorest/autorest/azure" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" @@ -49,10 +50,11 @@ const ( ) type azureEventHubScaler struct { - metricType v2.MetricTargetType - metadata *eventHubMetadata - client *eventhub.Hub - logger logr.Logger + metricType v2.MetricTargetType + metadata *eventHubMetadata + eventHubClient *azeventhubs.ProducerClient + blobStorageClient *azblob.Client + logger logr.Logger } type eventHubMetadata struct { @@ -64,7 +66,7 @@ type eventHubMetadata struct { } // NewAzureEventHubScaler creates a new scaler for eventHub -func NewAzureEventHubScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { +func NewAzureEventHubScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %w", err) @@ -77,16 +79,22 @@ func NewAzureEventHubScaler(ctx context.Context, config *scalersconfig.ScalerCon return nil, fmt.Errorf("unable to get eventhub metadata: %w", err) } - hub, err := azure.GetEventHubClient(ctx, parsedMetadata.eventHubInfo) + eventHubClient, err := azure.GetEventHubClient(parsedMetadata.eventHubInfo, logger) + if err != nil { + return nil, fmt.Errorf("unable to get eventhub client: %w", err) + } + + blobStorageClient, err := azure.GetStorageBlobClient(logger, config.PodIdentity, parsedMetadata.eventHubInfo.StorageConnection, parsedMetadata.eventHubInfo.StorageAccountName, parsedMetadata.eventHubInfo.BlobStorageEndpoint, config.GlobalHTTPTimeout) if err != nil { return nil, fmt.Errorf("unable to get eventhub client: %w", err) } return &azureEventHubScaler{ - metricType: metricType, - metadata: parsedMetadata, - client: hub, - logger: logger, + metricType: metricType, + metadata: parsedMetadata, + eventHubClient: eventHubClient, + blobStorageClient: blobStorageClient, + logger: logger, }, nil } @@ -152,17 +160,6 @@ func parseCommonAzureEventHubMetadata(config *scalersconfig.ScalerConfig, meta * meta.eventHubInfo.BlobContainer = val } - meta.eventHubInfo.EventHubResourceURL = azure.DefaultEventhubResourceURL - if val, ok := config.TriggerMetadata["cloud"]; ok { - if strings.EqualFold(val, azure.PrivateCloud) { - if resourceURL, ok := config.TriggerMetadata["eventHubResourceURL"]; ok { - meta.eventHubInfo.EventHubResourceURL = resourceURL - } else { - return fmt.Errorf("eventHubResourceURL must be provided for %s cloud type", azure.PrivateCloud) - } - } - } - serviceBusEndpointSuffixProvider := func(env az.Environment) (string, error) { return env.ServiceBusEndpointSuffix, nil } @@ -172,12 +169,6 @@ func parseCommonAzureEventHubMetadata(config *scalersconfig.ScalerConfig, meta * } meta.eventHubInfo.ServiceBusEndpointSuffix = serviceBusEndpointSuffix - activeDirectoryEndpoint, err := azure.ParseActiveDirectoryEndpoint(config.TriggerMetadata) - if err != nil { - return err - } - meta.eventHubInfo.ActiveDirectoryEndpoint = activeDirectoryEndpoint - meta.stalePartitionInfoThreshold = defaultStalePartitionInfoThreshold if val, ok := config.TriggerMetadata["stalePartitionInfoThreshold"]; ok { stalePartitionInfoThreshold, err := strconv.ParseInt(val, 10, 64) @@ -276,21 +267,19 @@ func parseAzureEventHubAuthenticationMetadata(logger logr.Logger, config *scaler } // GetUnprocessedEventCountInPartition gets number of unprocessed events in a given partition -func (s *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx context.Context, partitionInfo *eventhub.HubPartitionRuntimeInformation) (newEventCount int64, checkpoint azure.Checkpoint, err error) { - // if partitionInfo.LastSequenceNumber = -1, that means event hub partition is empty - if partitionInfo == nil || partitionInfo.LastSequenceNumber == -1 { +func (s *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx context.Context, partitionInfo azeventhubs.PartitionProperties) (newEventCount int64, checkpoint azure.Checkpoint, err error) { + // if partitionInfo.LastEnqueuedSequenceNumber = -1, that means event hub partition is empty + if partitionInfo.LastEnqueuedSequenceNumber == -1 { return 0, azure.Checkpoint{}, nil } - checkpoint, err = azure.GetCheckpointFromBlobStorage(ctx, s.metadata.eventHubInfo, partitionInfo.PartitionID) + checkpoint, err = azure.GetCheckpointFromBlobStorage(ctx, s.blobStorageClient, s.metadata.eventHubInfo, partitionInfo.PartitionID) if err != nil { // if blob not found return the total partition event count err = errors.Unwrap(err) - if stErr, ok := err.(azblob.StorageError); ok { - if stErr.ServiceCode() == azblob.ServiceCodeBlobNotFound || stErr.ServiceCode() == azblob.ServiceCodeContainerNotFound { - s.logger.V(1).Error(err, fmt.Sprintf("Blob container : %s not found to use checkpoint strategy, getting unprocessed event count without checkpoint", s.metadata.eventHubInfo.BlobContainer)) - return GetUnprocessedEventCountWithoutCheckpoint(partitionInfo), azure.Checkpoint{}, nil - } + if bloberror.HasCode(err, bloberror.BlobNotFound, bloberror.ContainerNotFound) { + s.logger.V(1).Error(err, fmt.Sprintf("Blob container : %s not found to use checkpoint strategy, getting unprocessed event count without checkpoint", s.metadata.eventHubInfo.BlobContainer)) + return GetUnprocessedEventCountWithoutCheckpoint(partitionInfo), azure.Checkpoint{}, nil } return -1, azure.Checkpoint{}, fmt.Errorf("unable to get checkpoint from storage: %w", err) } @@ -300,11 +289,11 @@ func (s *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx context.Co return unprocessedEventCountInPartition, checkpoint, nil } -func calculateUnprocessedEvents(partitionInfo *eventhub.HubPartitionRuntimeInformation, checkpoint azure.Checkpoint, stalePartitionInfoThreshold int64) int64 { +func calculateUnprocessedEvents(partitionInfo azeventhubs.PartitionProperties, checkpoint azure.Checkpoint, stalePartitionInfoThreshold int64) int64 { unprocessedEventCount := int64(0) - if partitionInfo.LastSequenceNumber >= checkpoint.SequenceNumber { - unprocessedEventCount = partitionInfo.LastSequenceNumber - checkpoint.SequenceNumber + if partitionInfo.LastEnqueuedSequenceNumber >= checkpoint.SequenceNumber { + unprocessedEventCount = partitionInfo.LastEnqueuedSequenceNumber - checkpoint.SequenceNumber } else { // Partition is a circular buffer, so it is possible that // partitionInfo.LastSequenceNumber < blob checkpoint's SequenceNumber @@ -316,7 +305,7 @@ func calculateUnprocessedEvents(partitionInfo *eventhub.HubPartitionRuntimeInfor // e.g., (9223372036854775807 - 15) + 10 = 9223372036854775802 // Calculate the unprocessed events - unprocessedEventCount = (math.MaxInt64 - checkpoint.SequenceNumber) + partitionInfo.LastSequenceNumber + unprocessedEventCount = (math.MaxInt64 - checkpoint.SequenceNumber) + partitionInfo.LastEnqueuedSequenceNumber } // If the result is greater than the buffer size - stale partition threshold @@ -329,10 +318,10 @@ func calculateUnprocessedEvents(partitionInfo *eventhub.HubPartitionRuntimeInfor } // GetUnprocessedEventCountWithoutCheckpoint returns the number of messages on the without a checkoutpoint info -func GetUnprocessedEventCountWithoutCheckpoint(partitionInfo *eventhub.HubPartitionRuntimeInformation) int64 { +func GetUnprocessedEventCountWithoutCheckpoint(partitionInfo azeventhubs.PartitionProperties) int64 { // if both values are 0 then there is exactly one message inside the hub. First message after init - if (partitionInfo.BeginningSequenceNumber == 0 && partitionInfo.LastSequenceNumber == 0) || (partitionInfo.BeginningSequenceNumber != partitionInfo.LastSequenceNumber) { - return (partitionInfo.LastSequenceNumber - partitionInfo.BeginningSequenceNumber) + 1 + if (partitionInfo.BeginningSequenceNumber == 0 && partitionInfo.LastEnqueuedSequenceNumber == 0) || (partitionInfo.BeginningSequenceNumber != partitionInfo.LastEnqueuedSequenceNumber) { + return (partitionInfo.LastEnqueuedSequenceNumber - partitionInfo.BeginningSequenceNumber) + 1 } return 0 @@ -360,8 +349,8 @@ func getTotalLagRelatedToPartitionAmount(unprocessedEventsCount int64, partition // Close closes Azure Event Hub Scaler func (s *azureEventHubScaler) Close(ctx context.Context) error { - if s.client != nil { - err := s.client.Close(ctx) + if s.eventHubClient != nil { + err := s.eventHubClient.Close(ctx) if err != nil { s.logger.Error(err, "error closing azure event hub client") return err @@ -374,7 +363,7 @@ func (s *azureEventHubScaler) Close(ctx context.Context) error { // GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *azureEventHubScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { totalUnprocessedEventCount := int64(0) - runtimeInfo, err := s.client.GetRuntimeInformation(ctx) + runtimeInfo, err := s.eventHubClient.GetEventHubProperties(ctx, nil) if err != nil { return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("unable to get runtimeInfo for metrics: %w", err) } @@ -383,7 +372,7 @@ func (s *azureEventHubScaler) GetMetricsAndActivity(ctx context.Context, metricN for i := 0; i < len(partitionIDs); i++ { partitionID := partitionIDs[i] - partitionRuntimeInfo, err := s.client.GetPartitionInformation(ctx, partitionID) + partitionRuntimeInfo, err := s.eventHubClient.GetPartitionProperties(ctx, partitionID, nil) if err != nil { return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("unable to get partitionRuntimeInfo for metrics: %w", err) } @@ -398,7 +387,7 @@ func (s *azureEventHubScaler) GetMetricsAndActivity(ctx context.Context, metricN totalUnprocessedEventCount += unprocessedEventCount s.logger.V(1).Info(fmt.Sprintf("Partition ID: %s, Last SequenceNumber: %d, Checkpoint SequenceNumber: %d, Total new events in partition: %d", - partitionRuntimeInfo.PartitionID, partitionRuntimeInfo.LastSequenceNumber, checkpoint.SequenceNumber, unprocessedEventCount)) + partitionRuntimeInfo.PartitionID, partitionRuntimeInfo.LastEnqueuedSequenceNumber, checkpoint.SequenceNumber, unprocessedEventCount)) } // set count to max if the sum is negative (Int64 overflow) to prevent negative metric values diff --git a/pkg/scalers/azure_eventhub_scaler_test.go b/pkg/scalers/azure_eventhub_scaler_test.go index 0e737eb3941..47e0e3de283 100644 --- a/pkg/scalers/azure_eventhub_scaler_test.go +++ b/pkg/scalers/azure_eventhub_scaler_test.go @@ -3,12 +3,13 @@ package scalers import ( "context" "fmt" - "net/url" "os" "testing" - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/go-logr/logr" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -23,7 +24,6 @@ const ( eventHubsConnection = "Endpoint=sb://testEventHubNamespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=testKey;EntityPath=testEventHub" serviceBusEndpointSuffix = "serviceBusEndpointSuffix" storageEndpointSuffix = "storageEndpointSuffix" - activeDirectoryEndpoint = "activeDirectoryEndpoint" eventHubResourceURL = "eventHubResourceURL" testEventHubNamespace = "kedatesteventhub" testEventHubName = "eventhub1" @@ -44,7 +44,7 @@ type eventHubMetricIdentifier struct { } type calculateUnprocessedEventsTestData struct { - partitionInfo *eventhub.HubPartitionRuntimeInformation + partitionInfo azeventhubs.PartitionProperties checkpoint azure.Checkpoint unprocessedEvents int64 } @@ -150,14 +150,9 @@ var parseEventHubMetadataDatasetWithPodIdentity = []parseEventHubMetadataTestDat resolvedEnv: sampleEventHubResolvedEnv, isError: true, }, - // metadata with private cloud missing active directory endpoint and resourceURL - { - metadata: map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace, "cloud": "private", "endpointSuffix": serviceBusEndpointSuffix}, - resolvedEnv: sampleEventHubResolvedEnv, - isError: true}, // metadata with private cloud missing service bus endpoint suffix and resource URL { - metadata: map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace, "cloud": "private", "activeDirectoryEndpoint": activeDirectoryEndpoint}, + metadata: map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace, "cloud": "private"}, resolvedEnv: sampleEventHubResolvedEnv, isError: true}, // metadata with private cloud missing service bus endpoint suffix and active directory endpoint @@ -168,7 +163,7 @@ var parseEventHubMetadataDatasetWithPodIdentity = []parseEventHubMetadataTestDat }, // properly formed metadata with private cloud { - metadata: map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace, "cloud": "private", "endpointSuffix": serviceBusEndpointSuffix, "activeDirectoryEndpoint": activeDirectoryEndpoint, "eventHubResourceURL": eventHubResourceURL}, + metadata: map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace, "cloud": "private", "endpointSuffix": serviceBusEndpointSuffix, "eventHubResourceURL": eventHubResourceURL}, resolvedEnv: sampleEventHubResolvedEnv, isError: false, }, @@ -202,7 +197,7 @@ var parseEventHubMetadataDatasetWithPodIdentity = []parseEventHubMetadataTestDat isError: true}, // properly formed event hub metadata with Pod Identity and no storage connection string, private cloud and storageEndpointSuffix { - metadata: map[string]string{"cloud": "private", "endpointSuffix": serviceBusEndpointSuffix, "activeDirectoryEndpoint": activeDirectoryEndpoint, "eventHubResourceURL": eventHubResourceURL, "storageAccountName": "aStorageAccount", "storageEndpointSuffix": storageEndpointSuffix, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace}, + metadata: map[string]string{"cloud": "private", "endpointSuffix": serviceBusEndpointSuffix, "eventHubResourceURL": eventHubResourceURL, "storageAccountName": "aStorageAccount", "storageEndpointSuffix": storageEndpointSuffix, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace}, resolvedEnv: sampleEventHubResolvedEnv, isError: false, }, @@ -211,50 +206,50 @@ var parseEventHubMetadataDatasetWithPodIdentity = []parseEventHubMetadataTestDat var calculateUnprocessedEventsDataset = []calculateUnprocessedEventsTestData{ { checkpoint: azure.NewCheckpoint(5), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 10}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 10}, unprocessedEvents: 5, }, { checkpoint: azure.NewCheckpoint(4611686018427387903), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 4611686018427387905}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 4611686018427387905}, unprocessedEvents: 2, }, { checkpoint: azure.NewCheckpoint(4611686018427387900), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 4611686018427387905}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 4611686018427387905}, unprocessedEvents: 5, }, { checkpoint: azure.NewCheckpoint(4000000000000200000), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 4000000000000000000}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 4000000000000000000}, unprocessedEvents: 9223372036854575807, }, // Empty checkpoint { checkpoint: azure.NewCheckpoint(0), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 1}, - unprocessedEvents: 1, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 2}, + unprocessedEvents: 2, }, // Stale PartitionInfo { checkpoint: azure.NewCheckpoint(15), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 10}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 10}, unprocessedEvents: 0, }, { checkpoint: azure.NewCheckpoint(4611686018427387910), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 4611686018427387905}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 4611686018427387905}, unprocessedEvents: 0, }, { checkpoint: azure.NewCheckpoint(5), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 9223372036854775797}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 9223372036854775797}, unprocessedEvents: 0, }, // Circular buffer reset { checkpoint: azure.NewCheckpoint(9223372036854775797), - partitionInfo: &eventhub.HubPartitionRuntimeInformation{LastSequenceNumber: 5}, + partitionInfo: azeventhubs.PartitionProperties{LastEnqueuedSequenceNumber: 5}, unprocessedEvents: 15, }, } @@ -310,15 +305,14 @@ func TestGetUnprocessedEventCountInPartition(t *testing.T) { if eventHubKey != "" && storageConnectionString != "" { eventHubConnectionString := fmt.Sprintf("Endpoint=sb://%s.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=%s;EntityPath=%s", testEventHubNamespace, eventHubKey, testEventHubName) - storageCredentials, endpoint, err := azure.ParseAzureStorageBlobConnection(ctx, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, storageConnectionString, "", "") + t.Log("Creating event hub client...") + eventHubProducer, err := azeventhubs.NewProducerClientFromConnectionString(eventHubConnectionString, "", nil) if err != nil { - t.Error(err) - t.FailNow() + t.Fatalf("Expected to create event hub client but got error: %s", err) } t.Log("Creating event hub client...") - hubOption := eventhub.HubWithPartitionedSender("0") - client, err := eventhub.NewHubFromConnectionString(eventHubConnectionString, hubOption) + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) if err != nil { t.Fatalf("Expected to create event hub client but got error: %s", err) } @@ -333,30 +327,30 @@ func TestGetUnprocessedEventCountInPartition(t *testing.T) { // Can actually test that numbers return testEventHubScaler.metadata.eventHubInfo.EventHubConnection = eventHubConnectionString - testEventHubScaler.metadata.eventHubInfo.StorageConnection = storageConnectionString - testEventHubScaler.client = client + testEventHubScaler.eventHubClient = eventHubProducer + testEventHubScaler.blobStorageClient = blobClient testEventHubScaler.metadata.eventHubInfo.EventHubConsumerGroup = "$Default" // Send 1 message to event hub first t.Log("Sending message to event hub") - err = SendMessageToEventHub(client) + err = SendMessageToEventHub(eventHubProducer) if err != nil { t.Error(err) } // Create fake checkpoint with path azure-webjobs-eventhub/.servicebus.windows.net//$Default t.Log("Creating container..") - ctx, err := CreateNewCheckpointInStorage(endpoint, storageCredentials, client) + err = CreateNewCheckpointInStorage(ctx, blobClient, eventHubProducer) if err != nil { t.Errorf("err creating container: %s", err) } - partitionInfo0, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0") + partitionInfo0, err := testEventHubScaler.eventHubClient.GetPartitionProperties(ctx, "0", nil) if err != nil { t.Errorf("unable to get partitionRuntimeInfo for partition 0: %s", err) } - partitionInfo1, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0") + partitionInfo1, err := testEventHubScaler.eventHubClient.GetPartitionProperties(ctx, "1", nil) if err != nil { t.Errorf("unable to get partitionRuntimeInfo for partition 1: %s", err) } @@ -380,7 +374,7 @@ func TestGetUnprocessedEventCountInPartition(t *testing.T) { // Delete container - this will also delete checkpoint t.Log("Deleting container...") - err = DeleteContainerInStorage(ctx, endpoint, storageCredentials) + err = DeleteContainerInStorage(ctx, blobClient) if err != nil { t.Error(err) } @@ -397,12 +391,17 @@ func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) { if eventHubKey != "" && storageConnectionString != "" { eventHubConnectionString := fmt.Sprintf("Endpoint=sb://%s.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=%s;EntityPath=%s", testEventHubNamespace, eventHubKey, testEventHubName) t.Log("Creating event hub client...") - hubOption := eventhub.HubWithPartitionedSender("0") - client, err := eventhub.NewHubFromConnectionString(eventHubConnectionString, hubOption) + client, err := azeventhubs.NewProducerClientFromConnectionString(eventHubConnectionString, "", nil) if err != nil { t.Errorf("Expected to create event hub client but got error: %s", err) } + t.Log("Creating event hub client...") + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) + if err != nil { + t.Fatalf("Expected to create event hub client but got error: %s", err) + } + if eventHubConnectionString == "" { t.Fatal("Event hub connection string needed for test") } @@ -413,8 +412,8 @@ func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) { // Can actually test that numbers return testEventHubScaler.metadata.eventHubInfo.EventHubConnection = eventHubConnectionString - testEventHubScaler.metadata.eventHubInfo.StorageConnection = storageConnectionString - testEventHubScaler.client = client + testEventHubScaler.eventHubClient = client + testEventHubScaler.blobStorageClient = blobClient testEventHubScaler.metadata.eventHubInfo.EventHubConsumerGroup = "$Default" // Send 1 message to event hub first @@ -426,12 +425,12 @@ func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) { ctx := context.Background() - partitionInfo0, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0") + partitionInfo0, err := testEventHubScaler.eventHubClient.GetPartitionProperties(ctx, "0", nil) if err != nil { t.Errorf("unable to get partitionRuntimeInfo for partition 0: %s", err) } - partitionInfo1, err := testEventHubScaler.client.GetPartitionInformation(ctx, "1") + partitionInfo1, err := testEventHubScaler.eventHubClient.GetPartitionProperties(ctx, "1", nil) if err != nil { t.Errorf("unable to get partitionRuntimeInfo for partition 1: %s", err) } @@ -456,14 +455,14 @@ func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) { } func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T) { - // After the first message the lastsequencenumber init to 0 - partitionInfo := eventhub.HubPartitionRuntimeInformation{ - PartitionID: "0", - LastSequenceNumber: 0, - BeginningSequenceNumber: 0, + // After the first message the LastEnqueuedSequenceNumber init to 0 + partitionInfo := azeventhubs.PartitionProperties{ + PartitionID: "0", + LastEnqueuedSequenceNumber: 0, + BeginningSequenceNumber: 0, } - unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo) + unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(partitionInfo) if unprocessedEventCountInPartition0 != 1 { t.Errorf("Expected 1 messages in partition 0, got %d", unprocessedEventCountInPartition0) @@ -472,13 +471,13 @@ func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T func TestGetUnprocessedEventCountWithoutCheckpointReturning0Message(t *testing.T) { // An empty partition starts with an equal value on last-/beginning-sequencenumber other than 0 - partitionInfo := eventhub.HubPartitionRuntimeInformation{ - PartitionID: "0", - LastSequenceNumber: 255, - BeginningSequenceNumber: 255, + partitionInfo := azeventhubs.PartitionProperties{ + PartitionID: "0", + LastEnqueuedSequenceNumber: 255, + BeginningSequenceNumber: 255, } - unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo) + unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(partitionInfo) if unprocessedEventCountInPartition0 != 0 { t.Errorf("Expected 0 messages in partition 0, got %d", unprocessedEventCountInPartition0) @@ -486,13 +485,13 @@ func TestGetUnprocessedEventCountWithoutCheckpointReturning0Message(t *testing.T } func TestGetUnprocessedEventCountWithoutCheckpointReturning2Messages(t *testing.T) { - partitionInfo := eventhub.HubPartitionRuntimeInformation{ - PartitionID: "0", - LastSequenceNumber: 1, - BeginningSequenceNumber: 0, + partitionInfo := azeventhubs.PartitionProperties{ + PartitionID: "0", + LastEnqueuedSequenceNumber: 1, + BeginningSequenceNumber: 0, } - unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo) + unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(partitionInfo) if unprocessedEventCountInPartition0 != 2 { t.Errorf("Expected 0 messages in partition 0, got %d", unprocessedEventCountInPartition0) @@ -515,58 +514,51 @@ func TestGetATotalLagOf100For20PartitionsOn100UnprocessedEvents(t *testing.T) { } } -func CreateNewCheckpointInStorage(endpoint *url.URL, credential azblob.Credential, client *eventhub.Hub) (context.Context, error) { +func CreateNewCheckpointInStorage(ctx context.Context, blobClient *azblob.Client, eventHubProducer *azeventhubs.ProducerClient) error { urlPath := fmt.Sprintf("%s.servicebus.windows.net/%s/$Default/", testEventHubNamespace, testEventHubName) // Create container - ctx := context.Background() - path, _ := url.Parse(testContainerName) - url := endpoint.ResolveReference(path) - containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{})) - _, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - if err != nil { - return ctx, fmt.Errorf("failed to create container: %w", err) + _, err := blobClient.CreateContainer(ctx, testContainerName, nil) + if err != nil && !bloberror.HasCode(err, bloberror.ContainerAlreadyExists) { + return fmt.Errorf("failed to create container: %w", err) } // Create directory checkpoints will be in err = os.MkdirAll(urlPath, 0777) if err != nil { - return ctx, fmt.Errorf("Unable to create directory: %w", err) + return fmt.Errorf("Unable to create directory: %w", err) } defer os.RemoveAll(urlPath) file, err := os.Create(fmt.Sprintf("%s/file", urlPath)) if err != nil { - return ctx, fmt.Errorf("Unable to create folder: %w", err) + return fmt.Errorf("Unable to create folder: %w", err) } defer file.Close() - blobFolderURL := containerURL.NewBlockBlobURL(urlPath) - // Upload file - _, err = azblob.UploadFileToBlockBlob(ctx, file, blobFolderURL, azblob.UploadToBlockBlobOptions{ - BlockSize: 4 * 1024 * 1024, - Parallelism: 16}) + _, err = blobClient.UploadFile(ctx, testContainerName, urlPath, file, &blockblob.UploadFileOptions{ + BlockSize: 4 * 1024 * 1024, + }) if err != nil { - return ctx, fmt.Errorf("Err uploading file to blob: %w", err) + return err } // Make checkpoint blob files - if err := CreatePartitionFile(ctx, urlPath, "0", containerURL, client); err != nil { - return ctx, fmt.Errorf("failed to create partitionID 0 file: %w", err) + if err := CreatePartitionFile(ctx, urlPath, "0", blobClient, eventHubProducer); err != nil { + return fmt.Errorf("failed to create partitionID 0 file: %w", err) } - if err := CreatePartitionFile(ctx, urlPath, "1", containerURL, client); err != nil { - return ctx, fmt.Errorf("failed to create partitionID 1 file: %w", err) + if err := CreatePartitionFile(ctx, urlPath, "1", blobClient, eventHubProducer); err != nil { + return fmt.Errorf("failed to create partitionID 1 file: %w", err) } - - return ctx, nil + return nil } -func CreatePartitionFile(ctx context.Context, urlPathToPartition string, partitionID string, containerURL azblob.ContainerURL, client *eventhub.Hub) error { +func CreatePartitionFile(ctx context.Context, urlPathToPartition string, partitionID string, blobClient *azblob.Client, eventHubClient *azeventhubs.ProducerClient) error { // Create folder structure filePath := urlPathToPartition + partitionID - partitionInfo, err := client.GetPartitionInformation(ctx, partitionID) + partitionInfo, err := eventHubClient.GetPartitionProperties(ctx, partitionID, nil) if err != nil { return fmt.Errorf("unable to get partition info: %w", err) } @@ -577,12 +569,12 @@ func CreatePartitionFile(ctx context.Context, urlPathToPartition string, partiti } if partitionID == "0" { - _, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastSequenceNumber-1, partitionID)) + _, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastEnqueuedSequenceNumber-1, partitionID)) if err != nil { return fmt.Errorf("unable to write to file: %w", err) } } else { - _, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastSequenceNumber, partitionID)) + _, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastEnqueuedSequenceNumber, partitionID)) if err != nil { return fmt.Errorf("unable to write to file: %w", err) } @@ -595,36 +587,43 @@ func CreatePartitionFile(ctx context.Context, urlPathToPartition string, partiti } defer file.Close() - blobFileURL := containerURL.NewBlockBlobURL(filePath) - // Upload folder - _, err = azblob.UploadFileToBlockBlob(ctx, file, blobFileURL, azblob.UploadToBlockBlobOptions{ - BlockSize: 4 * 1024 * 1024, - Parallelism: 16}) + _, err = blobClient.UploadFile(ctx, testContainerName, filePath, file, &blockblob.UploadFileOptions{ + BlockSize: 4 * 1024 * 1024, + }) if err != nil { return fmt.Errorf("Err uploading file to blob: %w", err) } return nil } -func SendMessageToEventHub(client *eventhub.Hub) error { +func SendMessageToEventHub(client *azeventhubs.ProducerClient) error { ctx := context.Background() - err := client.Send(ctx, eventhub.NewEventFromString("1")) + partition := "0" + newBatchOptions := &azeventhubs.EventDataBatchOptions{ + PartitionID: &partition, + } + batch, err := client.NewEventDataBatch(ctx, newBatchOptions) + if err != nil { + return fmt.Errorf("Error sending msg: %w", err) + } + err = batch.AddEventData(&azeventhubs.EventData{ + Body: []byte("hello"), + }, nil) + if err != nil { + return fmt.Errorf("Error sending msg: %w", err) + } + err = client.SendEventDataBatch(ctx, batch, nil) if err != nil { return fmt.Errorf("Error sending msg: %w", err) } + return nil } -func DeleteContainerInStorage(ctx context.Context, endpoint *url.URL, credential azblob.Credential) error { - path, _ := url.Parse(testContainerName) - url := endpoint.ResolveReference(path) - containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{})) - - _, err := containerURL.Delete(ctx, azblob.ContainerAccessConditions{ - ModifiedAccessConditions: azblob.ModifiedAccessConditions{}, - }) +func DeleteContainerInStorage(ctx context.Context, client *azblob.Client) error { + _, err := client.DeleteContainer(ctx, testContainerName, nil) if err != nil { return fmt.Errorf("failed to delete container in blob storage: %w", err) } @@ -638,8 +637,9 @@ func TestEventHubGetMetricSpecForScaling(t *testing.T) { t.Fatal("Could not parse metadata:", err) } mockEventHubScaler := azureEventHubScaler{ - metadata: meta, - client: nil, + metadata: meta, + eventHubClient: nil, + blobStorageClient: nil, } metricSpec := mockEventHubScaler.GetMetricSpecForScaling(context.Background()) diff --git a/pkg/scalers/azure_log_analytics_scaler.go b/pkg/scalers/azure_log_analytics_scaler.go index 94d12c020bb..71edcdbdba0 100644 --- a/pkg/scalers/azure_log_analytics_scaler.go +++ b/pkg/scalers/azure_log_analytics_scaler.go @@ -17,21 +17,16 @@ limitations under the License. package scalers import ( - "bytes" "context" - "crypto/sha256" - "encoding/base64" - "encoding/json" "fmt" - "io" - "net/http" - "net/url" "strconv" "strings" - "sync" - "time" - "github.com/Azure/azure-amqp-common-go/v4/auth" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + azcloud "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" @@ -42,72 +37,27 @@ import ( kedautil "github.com/kedacore/keda/v2/pkg/util" ) -const ( - aadTokenEndpoint = "%s/%s/oauth2/token" - laQueryEndpoint = "%s/v1/workspaces/%s/query" - defaultLogAnalyticsResourceURL = "https://api.loganalytics.io" -) - type azureLogAnalyticsScaler struct { metricType v2.MetricTargetType metadata *azureLogAnalyticsMetadata name string namespace string - httpClient *http.Client + client *azquery.LogsClient logger logr.Logger } type azureLogAnalyticsMetadata struct { - tenantID string - clientID string - clientSecret string - workspaceID string - podIdentity kedav1alpha1.AuthPodIdentity - query string - threshold float64 - activationThreshold float64 - triggerIndex int - logAnalyticsResourceURL string - activeDirectoryEndpoint string - unsafeSsl bool -} - -type tokenData struct { - TokenType string `json:"token_type"` - ExpiresIn int `json:"expires_in,string"` - ExtExpiresIn int `json:"ext_expires_in,string"` - ExpiresOn int64 `json:"expires_on,string"` - NotBefore int64 `json:"not_before,string"` - Resource string `json:"resource"` - AccessToken string `json:"access_token"` - IsWorkloadIdentityToken bool `json:"isWorkloadIdentityToken"` -} - -type metricsData struct { - value float64 - threshold float64 -} - -type queryResult struct { - Tables []struct { - Name string `json:"name"` - Columns []struct { - Name string `json:"name"` - Type string `json:"type"` - } `json:"columns"` - Rows [][]interface{} `json:"rows"` - } `json:"tables"` -} - -var tokenCache = struct { - sync.RWMutex - m map[string]tokenData -}{m: make(map[string]tokenData)} - -var logAnalyticsResourceURLInCloud = map[string]string{ - "AZUREPUBLICCLOUD": "https://api.loganalytics.io", - "AZUREUSGOVERNMENTCLOUD": "https://api.loganalytics.us", - "AZURECHINACLOUD": "https://api.loganalytics.azure.cn", + tenantID string + clientID string + clientSecret string + workspaceID string + podIdentity kedav1alpha1.AuthPodIdentity + query string + threshold float64 + activationThreshold float64 + triggerIndex int + cloud azcloud.Configuration + unsafeSsl bool } // NewAzureLogAnalyticsScaler creates a new Azure Log Analytics Scaler @@ -122,18 +72,49 @@ func NewAzureLogAnalyticsScaler(config *scalersconfig.ScalerConfig) (Scaler, err return nil, fmt.Errorf("failed to initialize Log Analytics scaler. Scaled object: %s. Namespace: %s. Inner Error: %w", config.ScalableObjectName, config.ScalableObjectNamespace, err) } - useSsl := azureLogAnalyticsMetadata.unsafeSsl + logger := InitializeLogger(config, "azure_log_analytics_scaler") + + client, err := CreateAzureLogsClient(config, azureLogAnalyticsMetadata, logger) + if err != nil { + return nil, err + } return &azureLogAnalyticsScaler{ metricType: metricType, metadata: azureLogAnalyticsMetadata, name: config.ScalableObjectName, namespace: config.ScalableObjectNamespace, - httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, useSsl), - logger: InitializeLogger(config, "azure_log_analytics_scaler"), + client: client, + logger: logger, }, nil } +func CreateAzureLogsClient(config *scalersconfig.ScalerConfig, meta *azureLogAnalyticsMetadata, logger logr.Logger) (*azquery.LogsClient, error) { + var creds azcore.TokenCredential + var err error + switch config.PodIdentity.Provider { + case "", kedav1alpha1.PodIdentityProviderNone: + creds, err = azidentity.NewClientSecretCredential(meta.tenantID, meta.clientID, meta.clientSecret, nil) + case kedav1alpha1.PodIdentityProviderAzureWorkload: + creds, err = azure.NewChainedCredential(logger, config.PodIdentity) + default: + return nil, fmt.Errorf("azure monitor does not support pod identity provider - %s", config.PodIdentity.Provider) + } + if err != nil { + return nil, err + } + client, err := azquery.NewLogsClient(creds, &azquery.LogsClientOptions{ + ClientOptions: policy.ClientOptions{ + Transport: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.unsafeSsl), + Cloud: meta.cloud, + }, + }) + if err != nil { + return nil, err + } + return client, nil +} + func parseAzureLogAnalyticsMetadata(config *scalersconfig.ScalerConfig) (*azureLogAnalyticsMetadata, error) { meta := azureLogAnalyticsMetadata{} switch config.PodIdentity.Provider { @@ -207,27 +188,24 @@ func parseAzureLogAnalyticsMetadata(config *scalersconfig.ScalerConfig) (*azureL } meta.triggerIndex = config.TriggerIndex - meta.logAnalyticsResourceURL = defaultLogAnalyticsResourceURL + meta.cloud = azcloud.AzurePublic if cloud, ok := config.TriggerMetadata["cloud"]; ok { if strings.EqualFold(cloud, azure.PrivateCloud) { if resource, ok := config.TriggerMetadata["logAnalyticsResourceURL"]; ok && resource != "" { - meta.logAnalyticsResourceURL = resource + meta.cloud.Services[azquery.ServiceNameLogs] = azcloud.ServiceConfiguration{ + Endpoint: fmt.Sprintf("%s/v1", resource), + Audience: resource, + } } else { return nil, fmt.Errorf("logAnalyticsResourceURL must be provided for %s cloud type", azure.PrivateCloud) } - } else if resource, ok := logAnalyticsResourceURLInCloud[strings.ToUpper(cloud)]; ok { - meta.logAnalyticsResourceURL = resource + } else if resource, ok := azure.AzureClouds[strings.ToUpper(cloud)]; ok { + meta.cloud = resource } else { return nil, fmt.Errorf("there is no cloud environment matching the name %s", cloud) } } - activeDirectoryEndpoint, err := azure.ParseActiveDirectoryEndpoint(config.TriggerMetadata) - if err != nil { - return nil, err - } - meta.activeDirectoryEndpoint = activeDirectoryEndpoint - // Getting unsafeSsl, observe that we don't check AuthParams for unsafeSsl meta.unsafeSsl = false unsafeSslVal, err := getParameterFromConfig(config, "unsafeSsl", false) @@ -268,167 +246,59 @@ func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling(context.Context) []v2. // GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *azureLogAnalyticsScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - receivedMetric, err := s.getMetricData(ctx) + val, err := s.getMetricData(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("failed to get metrics. Scaled object: %s. Namespace: %s. Inner Error: %w", s.name, s.namespace, err) } - metric := GenerateMetricInMili(metricName, receivedMetric.value) + metric := GenerateMetricInMili(metricName, val) - return []external_metrics.ExternalMetricValue{metric}, receivedMetric.value > s.metadata.activationThreshold, nil + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationThreshold, nil } func (s *azureLogAnalyticsScaler) Close(context.Context) error { - if s.httpClient != nil { - s.httpClient.CloseIdleConnections() - } return nil } -func (s *azureLogAnalyticsScaler) getMetricData(ctx context.Context) (metricsData, error) { - tokenInfo, err := s.getAccessToken(ctx) +func (s *azureLogAnalyticsScaler) getMetricData(ctx context.Context) (float64, error) { + response, err := s.client.QueryWorkspace(ctx, s.metadata.workspaceID, azquery.Body{ + Query: &s.metadata.query, + }, nil) if err != nil { - return metricsData{}, err + return -1, err } - metricsInfo, err := s.executeQuery(ctx, s.metadata.query, tokenInfo) - if err != nil { - return metricsData{}, err + // Pre-validation of query result: + switch { + case len(response.Tables) == 0 || len(response.Tables[0].Columns) == 0 || len(response.Tables[0].Rows) == 0: + return -1, fmt.Errorf("error validating Log Analytics request. Details: there is no results after running your query") + case len(response.Tables) > 1: + return -1, fmt.Errorf("error validating Log Analytics request. Details: too many tables in query result: %d, expected: 1", len(response.Tables)) + case len(response.Tables[0].Rows) > 1: + return -1, fmt.Errorf("error validating Log Analytics request. Details: too many rows in query result: %d, expected: 1", len(response.Tables[0].Rows)) } - s.logger.V(1).Info("Providing metric value", "metrics value", metricsInfo.value, "scaler name", s.name, "namespace", s.namespace) - - return metricsInfo, nil -} - -func (s *azureLogAnalyticsScaler) getAccessToken(ctx context.Context) (tokenData, error) { - // if there is no token yet or it will be expired in less, that 30 secs - currentTimeSec := time.Now().Unix() - tokenInfo := tokenData{} - - switch s.metadata.podIdentity.Provider { - case "", kedav1alpha1.PodIdentityProviderNone: - tokenInfo, _ = getTokenFromCache(s.metadata.clientID, s.metadata.clientSecret) - case kedav1alpha1.PodIdentityProviderAzureWorkload: - tokenInfo, _ = getTokenFromCache(string(s.metadata.podIdentity.Provider), string(s.metadata.podIdentity.Provider)) - } - - if currentTimeSec+30 > tokenInfo.ExpiresOn { - newTokenInfo, err := s.refreshAccessToken(ctx) - if err != nil { - return tokenData{}, err + if len(response.Tables[0].Rows[0]) > 0 { + metricDataType := response.Tables[0].Columns[0].Type + metricVal := response.Tables[0].Rows[0][0] + if metricDataType == nil || metricVal == nil { + return -1, fmt.Errorf("error parsing the response %w", err) } - - switch s.metadata.podIdentity.Provider { - case "", kedav1alpha1.PodIdentityProviderNone: - s.logger.V(1).Info("Token for Service Principal has been refreshed", "clientID", s.metadata.clientID, "scaler name", s.name, "namespace", s.namespace) - _ = setTokenInCache(s.metadata.clientID, s.metadata.clientSecret, newTokenInfo) - case kedav1alpha1.PodIdentityProviderAzureWorkload: - s.logger.V(1).Info("Token for Pod Identity has been refreshed", "type", s.metadata.podIdentity, "scaler name", s.name, "namespace", s.namespace) - _ = setTokenInCache(string(s.metadata.podIdentity.Provider), string(s.metadata.podIdentity.Provider), newTokenInfo) - } - - return newTokenInfo, nil - } - return tokenInfo, nil -} - -func (s *azureLogAnalyticsScaler) executeQuery(ctx context.Context, query string, tokenInfo tokenData) (metricsData, error) { - queryData := queryResult{} - var body []byte - var statusCode int - var err error - - body, statusCode, err = s.executeLogAnalyticsREST(ctx, query, tokenInfo) - - // Handle expired token - if statusCode == 403 || (len(body) > 0 && strings.Contains(string(body), "TokenExpired")) { - tokenInfo, err = s.refreshAccessToken(ctx) + parsedMetricVal, err := parseTableValueToFloat64(metricVal, *metricDataType) if err != nil { - return metricsData{}, err - } - - switch s.metadata.podIdentity.Provider { - case "", kedav1alpha1.PodIdentityProviderNone: - s.logger.V(1).Info("Token for Service Principal has been refreshed", "clientID", s.metadata.clientID, "scaler name", s.name, "namespace", s.namespace) - _ = setTokenInCache(s.metadata.clientID, s.metadata.clientSecret, tokenInfo) - case kedav1alpha1.PodIdentityProviderAzureWorkload: - s.logger.V(1).Info("Token for Pod Identity has been refreshed", "type", s.metadata.podIdentity, "scaler name", s.name, "namespace", s.namespace) - _ = setTokenInCache(string(s.metadata.podIdentity.Provider), string(s.metadata.podIdentity.Provider), tokenInfo) - } - - if err == nil { - body, statusCode, err = s.executeLogAnalyticsREST(ctx, query, tokenInfo) - } else { - return metricsData{}, err + return -1, fmt.Errorf("error parsing the response %w", err) } + return parsedMetricVal, nil } - if statusCode != 200 && statusCode != 0 { - return metricsData{}, fmt.Errorf("error processing Log Analytics request. HTTP code %d. Inner Error: %v. Body: %s", statusCode, err, string(body)) - } - - if err != nil { - return metricsData{}, err - } - - if len(body) == 0 { - return metricsData{}, fmt.Errorf("error processing Log Analytics request. Details: empty body. HTTP code: %d", statusCode) - } - - err = json.NewDecoder(bytes.NewReader(body)).Decode(&queryData) - if err != nil { - return metricsData{}, fmt.Errorf("error processing Log Analytics request. Details: can't decode response body to JSON from REST API result. HTTP code: %d. Inner Error: %v. Body: %s", statusCode, err, string(body)) - } - - if statusCode == 200 { - metricsInfo := metricsData{} - metricsInfo.threshold = s.metadata.threshold - metricsInfo.value = 0 - - // Pre-validation of query result: - switch { - case len(queryData.Tables) == 0 || len(queryData.Tables[0].Columns) == 0 || len(queryData.Tables[0].Rows) == 0: - return metricsData{}, fmt.Errorf("error validating Log Analytics request. Details: there is no results after running your query. HTTP code: %d. Body: %s", statusCode, string(body)) - case len(queryData.Tables) > 1: - return metricsData{}, fmt.Errorf("error validating Log Analytics request. Details: too many tables in query result: %d, expected: 1. HTTP code: %d. Body: %s", len(queryData.Tables), statusCode, string(body)) - case len(queryData.Tables[0].Rows) > 1: - return metricsData{}, fmt.Errorf("error validating Log Analytics request. Details: too many rows in query result: %d, expected: 1. HTTP code: %d. Body: %s", len(queryData.Tables[0].Rows), statusCode, string(body)) - } - - if len(queryData.Tables[0].Rows[0]) > 0 { - metricDataType := queryData.Tables[0].Columns[0].Type - metricVal := queryData.Tables[0].Rows[0][0] - parsedMetricVal, err := parseTableValueToFloat64(metricVal, metricDataType) - if err != nil { - return metricsData{}, fmt.Errorf("%s. HTTP code: %d. Body: %s", err.Error(), statusCode, string(body)) - } - metricsInfo.value = parsedMetricVal - } - - if len(queryData.Tables[0].Rows[0]) > 1 { - thresholdDataType := queryData.Tables[0].Columns[1].Type - thresholdVal := queryData.Tables[0].Rows[0][1] - parsedThresholdVal, err := parseTableValueToFloat64(thresholdVal, thresholdDataType) - if err != nil { - return metricsData{}, fmt.Errorf("%s. HTTP code: %d. Body: %s", err.Error(), statusCode, string(body)) - } - metricsInfo.threshold = parsedThresholdVal - } else { - metricsInfo.threshold = -1 - } - - return metricsInfo, nil - } - - return metricsData{}, fmt.Errorf("error processing Log Analytics request. Details: unknown error. HTTP code: %d. Body: %s", statusCode, string(body)) + return -1, fmt.Errorf("error parsing the response %w", err) } -func parseTableValueToFloat64(value interface{}, dataType string) (float64, error) { +func parseTableValueToFloat64(value interface{}, dataType azquery.LogsColumnType) (float64, error) { if value != nil { // type can be: real, int, long - if dataType == "real" || dataType == "int" || dataType == "long" { + if dataType == azquery.LogsColumnTypeReal || dataType == azquery.LogsColumnTypeInt || dataType == azquery.LogsColumnTypeLong { convertedValue, isConverted := value.(float64) if !isConverted { return 0, fmt.Errorf("error validating Log Analytics request. Details: cannot convert result to type float64") @@ -442,181 +312,3 @@ func parseTableValueToFloat64(value interface{}, dataType string) (float64, erro } return 0, fmt.Errorf("error validating Log Analytics request. Details: value is empty, check your query") } - -func (s *azureLogAnalyticsScaler) refreshAccessToken(ctx context.Context) (tokenData, error) { - tokenInfo, err := s.getAuthorizationToken(ctx) - - if err != nil { - return tokenData{}, err - } - - if tokenInfo.IsWorkloadIdentityToken { - return tokenInfo, nil - } - - // Now, let's check we can use this token. If no, wait until we can use it - currentTimeSec := time.Now().Unix() - if currentTimeSec < tokenInfo.NotBefore { - if currentTimeSec < tokenInfo.NotBefore+10 { - sleepDurationSec := int(tokenInfo.NotBefore - currentTimeSec + 1) - s.logger.V(1).Info("AAD token not ready", "delay (seconds)", sleepDurationSec, "scaler name", s.name, "namespace", s.namespace) - time.Sleep(time.Duration(sleepDurationSec) * time.Second) - } else { - return tokenData{}, fmt.Errorf("error getting access token. Details: AAD token has been received, but start date begins in %d seconds, so current operation will be skipped", tokenInfo.NotBefore-currentTimeSec) - } - } - - return tokenInfo, nil -} - -func (s *azureLogAnalyticsScaler) getAuthorizationToken(ctx context.Context) (tokenData, error) { - var body []byte - var statusCode int - var err error - var tokenInfo tokenData - - switch s.metadata.podIdentity.Provider { - case kedav1alpha1.PodIdentityProviderAzureWorkload: - aadToken, err := azure.GetAzureADWorkloadIdentityToken(ctx, s.metadata.podIdentity.GetIdentityID(), s.metadata.podIdentity.GetIdentityTenantID(), s.metadata.podIdentity.GetIdentityAuthorityHost(), s.metadata.logAnalyticsResourceURL) - if err != nil { - return tokenData{}, nil - } - - expiresOn := aadToken.ExpiresOnTimeObject.Unix() - if err != nil { - return tokenData{}, nil - } - - tokenInfo = tokenData{ - TokenType: string(auth.CBSTokenTypeJWT), - AccessToken: aadToken.AccessToken, - ExpiresOn: expiresOn, - Resource: s.metadata.logAnalyticsResourceURL, - IsWorkloadIdentityToken: true, - } - - return tokenInfo, nil - case "", kedav1alpha1.PodIdentityProviderNone: - body, statusCode, err = s.executeAADApicall(ctx) - } - - if err != nil { - return tokenData{}, fmt.Errorf("error getting access token. HTTP code: %d. Inner Error: %v. Body: %s", statusCode, err, string(body)) - } else if len(body) == 0 { - return tokenData{}, fmt.Errorf("error getting access token. Details: empty body. HTTP code: %d", statusCode) - } - - err = json.NewDecoder(bytes.NewReader(body)).Decode(&tokenInfo) - if err != nil { - return tokenData{}, fmt.Errorf("error getting access token. Details: can't decode response body to JSON after getting access token. HTTP code: %d. Inner Error: %v. Body: %s", statusCode, err, string(body)) - } - - if statusCode == 200 { - return tokenInfo, nil - } - - return tokenData{}, fmt.Errorf("error getting access token. Details: unknown error. HTTP code: %d. Body: %s", statusCode, string(body)) -} - -func (s *azureLogAnalyticsScaler) executeLogAnalyticsREST(ctx context.Context, query string, tokenInfo tokenData) ([]byte, int, error) { - m := map[string]interface{}{"query": query} - - jsonBytes, err := json.Marshal(m) - if err != nil { - return nil, 0, fmt.Errorf("can't construct JSON for request to Log Analytics API. Inner Error: %w", err) - } - - request, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf(laQueryEndpoint, s.metadata.logAnalyticsResourceURL, s.metadata.workspaceID), bytes.NewBuffer(jsonBytes)) // URL-encoded payload - if err != nil { - return nil, 0, fmt.Errorf("can't construct HTTP request to Log Analytics API. Inner Error: %w", err) - } - - request.Header.Add("Content-Type", "application/json") - request.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenInfo.AccessToken)) - request.Header.Add("Content-Length", fmt.Sprintf("%d", len(jsonBytes))) - - return s.runHTTP(request, "Log Analytics REST api") -} - -func (s *azureLogAnalyticsScaler) executeAADApicall(ctx context.Context) ([]byte, int, error) { - data := url.Values{ - "grant_type": {"client_credentials"}, - "client_id": {s.metadata.clientID}, - "redirect_uri": {"http://"}, - "resource": {s.metadata.logAnalyticsResourceURL}, - "client_secret": {s.metadata.clientSecret}, - } - - request, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf(aadTokenEndpoint, s.metadata.activeDirectoryEndpoint, s.metadata.tenantID), strings.NewReader(data.Encode())) // URL-encoded payload - if err != nil { - return nil, 0, fmt.Errorf("can't construct HTTP request to Azure Active Directory. Inner Error: %w", err) - } - - request.Header.Add("Content-Type", "application/x-www-form-urlencoded") - request.Header.Add("Content-Length", fmt.Sprintf("%d", len(data.Encode()))) - - return s.runHTTP(request, "AAD") -} - -func (s *azureLogAnalyticsScaler) runHTTP(request *http.Request, caller string) ([]byte, int, error) { - request.Header.Add("Cache-Control", "no-cache") - request.Header.Add("User-Agent", "keda/2.0.0") - - resp, err := s.httpClient.Do(request) - if err != nil && resp != nil { - return nil, resp.StatusCode, fmt.Errorf("error calling %s. Inner Error: %w", caller, err) - } else if err != nil { - return nil, 0, fmt.Errorf("error calling %s. Inner Error: %w", caller, err) - } - - defer resp.Body.Close() - s.httpClient.CloseIdleConnections() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, resp.StatusCode, fmt.Errorf("error reading %s response body: Inner Error: %w", caller, err) - } - - return body, resp.StatusCode, nil -} - -func getTokenFromCache(clientID string, clientSecret string) (tokenData, error) { - key, err := getHash(clientID, clientSecret) - if err != nil { - return tokenData{}, fmt.Errorf("error calculating sha1 hash. Inner Error: %w", err) - } - - tokenCache.RLock() - - if val, ok := tokenCache.m[key]; ok && val.AccessToken != "" { - tokenCache.RUnlock() - return val, nil - } - - tokenCache.RUnlock() - return tokenData{}, fmt.Errorf("error getting value from token cache. Details: unknown error") -} - -func setTokenInCache(clientID string, clientSecret string, tokenInfo tokenData) error { - key, err := getHash(clientID, clientSecret) - if err != nil { - return err - } - - tokenCache.Lock() - tokenCache.m[key] = tokenInfo - tokenCache.Unlock() - - return nil -} - -func getHash(clientID string, clientSecret string) (string, error) { - sha256Hash := sha256.New() - _, err := fmt.Fprintf(sha256Hash, "%s|%s", clientID, clientSecret) - - if err != nil { - return "", err - } - - return base64.StdEncoding.EncodeToString(sha256Hash.Sum(nil)), nil -} diff --git a/pkg/scalers/azure_log_analytics_scaler_test.go b/pkg/scalers/azure_log_analytics_scaler_test.go index 343a1b2f776..b423599e05d 100644 --- a/pkg/scalers/azure_log_analytics_scaler_test.go +++ b/pkg/scalers/azure_log_analytics_scaler_test.go @@ -18,7 +18,6 @@ package scalers import ( "context" - "net/http" "testing" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -31,7 +30,6 @@ const ( clientSecret = "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs" workspaceID = "074dd9f8-c368-4220-9400-acb6e80fc325" testLogAnalyticsResourceURL = "testLogAnalyticsResourceURL" - testActiveDirectoryEndpoint = "testActiveDirectoryEndpoint" ) type parseLogAnalyticsMetadataTestData struct { @@ -95,11 +93,9 @@ var testLogAnalyticsMetadata = []parseLogAnalyticsMetadataTestData{ // Known Azure Cloud {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000", "cloud": "azurePublicCloud"}, false}, // Private Cloud - {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000", "cloud": "private", "logAnalyticsResourceURL": testLogAnalyticsResourceURL, "activeDirectoryEndpoint": testActiveDirectoryEndpoint}, false}, - // Private Cloud missing log analytics resource url - {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000", "cloud": "private", "activeDirectoryEndpoint": testActiveDirectoryEndpoint}, true}, + {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000", "cloud": "private", "logAnalyticsResourceURL": testLogAnalyticsResourceURL}, false}, // Private Cloud missing active directory endpoint - {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000", "cloud": "private", "logAnalyticsResourceURL": testLogAnalyticsResourceURL}, true}, + {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000", "cloud": "private", "logAnalyticsResourceURL": testLogAnalyticsResourceURL}, false}, // Unsupported cloud {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000", "cloud": "azureGermanCloud"}, true}, } @@ -187,10 +183,10 @@ func TestLogAnalyticsGetMetricSpecForScaling(t *testing.T) { t.Fatal("Could not parse metadata:", err) } mockLogAnalyticsScaler := azureLogAnalyticsScaler{ - metadata: meta, - name: "test-so", - namespace: "test-ns", - httpClient: http.DefaultClient, + metadata: meta, + name: "test-so", + namespace: "test-ns", + client: nil, } metricSpec := mockLogAnalyticsScaler.GetMetricSpecForScaling(context.Background()) diff --git a/pkg/scalers/azure_monitor_scaler.go b/pkg/scalers/azure_monitor_scaler.go index 1b596554726..71050b5063a 100644 --- a/pkg/scalers/azure_monitor_scaler.go +++ b/pkg/scalers/azure_monitor_scaler.go @@ -19,10 +19,16 @@ package scalers import ( "context" "fmt" + "slices" "strconv" "strings" + "time" - az "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + azcloud "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" @@ -39,15 +45,44 @@ const ( activationTargetValueName = "activationTargetValue" ) +// monitorInfo to create metric request +type monitorInfo struct { + ResourceURI string + TenantID string + SubscriptionID string + ResourceGroupName string + Name *string + Namespace *string + Filter *string + AggregationInterval string + AggregationType *azquery.AggregationType + ClientID string + ClientPassword string + Cloud azcloud.Configuration +} + +func (m monitorInfo) MetricResourceURI() string { + resourceInfo := strings.Split(m.ResourceURI, "/") + resourceProviderNamespace := resourceInfo[0] + resourceType := resourceInfo[1] + resourceName := resourceInfo[2] + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", + m.SubscriptionID, + m.ResourceGroupName, + resourceProviderNamespace, + resourceType, + resourceName) +} + type azureMonitorScaler struct { - metricType v2.MetricTargetType - metadata *azureMonitorMetadata - podIdentity kedav1alpha1.AuthPodIdentity - logger logr.Logger + metricType v2.MetricTargetType + metadata *azureMonitorMetadata + logger logr.Logger + client *azquery.MetricsClient } type azureMonitorMetadata struct { - azureMonitorInfo azure.MonitorInfo + azureMonitorInfo monitorInfo targetValue float64 activationTargetValue float64 triggerIndex int @@ -67,17 +102,47 @@ func NewAzureMonitorScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error parsing azure monitor metadata: %w", err) } + client, err := CreateAzureMetricsClient(config, meta, logger) + if err != nil { + return nil, err + } return &azureMonitorScaler{ - metricType: metricType, - metadata: meta, - podIdentity: config.PodIdentity, - logger: logger, + metricType: metricType, + metadata: meta, + logger: logger, + client: client, }, nil } +func CreateAzureMetricsClient(config *scalersconfig.ScalerConfig, meta *azureMonitorMetadata, logger logr.Logger) (*azquery.MetricsClient, error) { + var creds azcore.TokenCredential + var err error + switch config.PodIdentity.Provider { + case "", kedav1alpha1.PodIdentityProviderNone: + creds, err = azidentity.NewClientSecretCredential(meta.azureMonitorInfo.TenantID, meta.azureMonitorInfo.ClientID, meta.azureMonitorInfo.ClientPassword, nil) + case kedav1alpha1.PodIdentityProviderAzureWorkload: + creds, err = azure.NewChainedCredential(logger, config.PodIdentity) + default: + return nil, fmt.Errorf("azure monitor does not support pod identity provider - %s", config.PodIdentity.Provider) + } + if err != nil { + return nil, err + } + client, err := azquery.NewMetricsClient(creds, &azquery.MetricsClientOptions{ + ClientOptions: policy.ClientOptions{ + Transport: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + Cloud: meta.azureMonitorInfo.Cloud, + }, + }) + if err != nil { + return nil, err + } + return client, nil +} + func parseAzureMonitorMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (*azureMonitorMetadata, error) { meta := azureMonitorMetadata{ - azureMonitorInfo: azure.MonitorInfo{}, + azureMonitorInfo: monitorInfo{}, } if val, ok := config.TriggerMetadata[targetValueName]; ok && val != "" { @@ -123,19 +188,24 @@ func parseAzureMonitorMetadata(config *scalersconfig.ScalerConfig, logger logr.L } if val, ok := config.TriggerMetadata[azureMonitorMetricName]; ok && val != "" { - meta.azureMonitorInfo.Name = val + meta.azureMonitorInfo.Name = &val } else { return nil, fmt.Errorf("no metricName given") } if val, ok := config.TriggerMetadata["metricAggregationType"]; ok && val != "" { - meta.azureMonitorInfo.AggregationType = val + aggregationType := azquery.AggregationType(val) + allowedTypes := azquery.PossibleAggregationTypeValues() + if !slices.Contains(allowedTypes, aggregationType) { + return nil, fmt.Errorf("invalid metricAggregationType given") + } + meta.azureMonitorInfo.AggregationType = &aggregationType } else { return nil, fmt.Errorf("no metricAggregationType given") } if val, ok := config.TriggerMetadata["metricFilter"]; ok && val != "" { - meta.azureMonitorInfo.Filter = val + meta.azureMonitorInfo.Filter = &val } if val, ok := config.TriggerMetadata["metricAggregationInterval"]; ok && val != "" { @@ -161,7 +231,7 @@ func parseAzureMonitorMetadata(config *scalersconfig.ScalerConfig, logger logr.L } if val, ok := config.TriggerMetadata["metricNamespace"]; ok { - meta.azureMonitorInfo.Namespace = val + meta.azureMonitorInfo.Namespace = &val } clientID, clientPassword, err := parseAzurePodIdentityParams(config) @@ -171,26 +241,37 @@ func parseAzureMonitorMetadata(config *scalersconfig.ScalerConfig, logger logr.L meta.azureMonitorInfo.ClientID = clientID meta.azureMonitorInfo.ClientPassword = clientPassword - meta.triggerIndex = config.TriggerIndex - - azureResourceManagerEndpointProvider := func(env az.Environment) (string, error) { - return env.ResourceManagerEndpoint, nil - } - azureResourceManagerEndpoint, err := azure.ParseEnvironmentProperty(config.TriggerMetadata, "azureResourceManagerEndpoint", azureResourceManagerEndpointProvider) + cloud, err := parseCloud(config.TriggerMetadata) if err != nil { return nil, err } - meta.azureMonitorInfo.AzureResourceManagerEndpoint = azureResourceManagerEndpoint - - activeDirectoryEndpoint, err := azure.ParseActiveDirectoryEndpoint(config.TriggerMetadata) - if err != nil { - return nil, err - } - meta.azureMonitorInfo.ActiveDirectoryEndpoint = activeDirectoryEndpoint + meta.azureMonitorInfo.Cloud = cloud + meta.triggerIndex = config.TriggerIndex return &meta, nil } +func parseCloud(metadata map[string]string) (azcloud.Configuration, error) { + foundCloud := azcloud.AzurePublic + if cloud, ok := metadata["cloud"]; ok { + if strings.EqualFold(cloud, azure.PrivateCloud) { + if resource, ok := metadata["azureResourceManagerEndpoint"]; ok && resource != "" { + foundCloud.Services[azquery.ServiceNameLogs] = azcloud.ServiceConfiguration{ + Endpoint: resource, + Audience: resource, + } + } else { + return azcloud.Configuration{}, fmt.Errorf("logAnalyticsResourceURL must be provided for %s cloud type", azure.PrivateCloud) + } + } else if resource, ok := azure.AzureClouds[strings.ToUpper(cloud)]; ok { + foundCloud = resource + } else { + return azcloud.Configuration{}, fmt.Errorf("there is no cloud environment matching the name %s", cloud) + } + } + return foundCloud, nil +} + // parseAzurePodIdentityParams gets the activeDirectory clientID and password func parseAzurePodIdentityParams(config *scalersconfig.ScalerConfig) (clientID string, clientPassword string, err error) { switch config.PodIdentity.Provider { @@ -225,7 +306,7 @@ func (s *azureMonitorScaler) Close(context.Context) error { func (s *azureMonitorScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-monitor-%s", s.metadata.azureMonitorInfo.Name))), + Name: GenerateMetricNameWithIndex(s.metadata.triggerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-monitor-%s", *s.metadata.azureMonitorInfo.Name))), }, Target: GetMetricTargetMili(s.metricType, s.metadata.targetValue), } @@ -235,9 +316,8 @@ func (s *azureMonitorScaler) GetMetricSpecForScaling(context.Context) []v2.Metri // GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *azureMonitorScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - val, err := azure.GetAzureMetricValue(ctx, s.metadata.azureMonitorInfo, s.podIdentity) + val, err := s.requestMetric(ctx) if err != nil { - s.logger.Error(err, "error getting azure monitor metric") return []external_metrics.ExternalMetricValue{}, false, err } @@ -245,3 +325,93 @@ func (s *azureMonitorScaler) GetMetricsAndActivity(ctx context.Context, metricNa return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.activationTargetValue, nil } +func (s *azureMonitorScaler) requestMetric(ctx context.Context) (float64, error) { + timespan, err := formatTimeSpan(s.metadata.azureMonitorInfo.AggregationInterval) + if err != nil { + return -1, err + } + opts := &azquery.MetricsClientQueryResourceOptions{ + MetricNames: s.metadata.azureMonitorInfo.Name, + MetricNamespace: s.metadata.azureMonitorInfo.Namespace, + Filter: s.metadata.azureMonitorInfo.Filter, + Interval: nil, + Top: nil, + ResultType: nil, + OrderBy: nil, + } + + opts.Timespan = timespan + opts.Aggregation = append(opts.Aggregation, s.metadata.azureMonitorInfo.AggregationType) + response, err := s.client.QueryResource(ctx, s.metadata.azureMonitorInfo.MetricResourceURI(), opts) + if err != nil || len(response.Value) != 1 { + s.logger.Error(err, "error getting azure monitor metric") + return -1, err + } + + if response.Value == nil || len(response.Value) == 0 { + err := fmt.Errorf("got an empty response for metric %s/%s and aggregate type %s", "azMetricRequest.ResourceProviderNamespace", "azMetricRequest.MetricName", "azMetricRequest.Aggregation") + return -1, err + } + + timeseriesPtr := response.Value[0].TimeSeries + if len(timeseriesPtr) == 0 { + err := fmt.Errorf("got metric result for %s/%s and aggregate type %s without timeseries", "azMetricRequest.ResourceProviderNamespace", "azMetricRequest.MetricName", "azMetricRequest.Aggregation") + return -1, err + } + + dataPtr := response.Value[0].TimeSeries[0].Data + if len(dataPtr) == 0 { + err := fmt.Errorf("got metric result for %s/%s and aggregate type %s without any metric values", "azMetricRequest.ResourceProviderNamespace", "azMetricRequest.MetricName", "azMetricRequest.Aggregation") + return -1, err + } + + val, err := verifyAggregationTypeIsSupported(*s.metadata.azureMonitorInfo.AggregationType, dataPtr) + if err != nil { + return -1, err + } + return val, nil +} + +// formatTimeSpan defaults to a 5 minute timespan if the user does not provide one +func formatTimeSpan(timeSpan string) (*azquery.TimeInterval, error) { + endtime := time.Now().UTC() + starttime := time.Now().Add(-(5 * time.Minute)).UTC() + if timeSpan != "" { + aggregationInterval := strings.Split(timeSpan, ":") + hours, herr := strconv.Atoi(aggregationInterval[0]) + minutes, merr := strconv.Atoi(aggregationInterval[1]) + seconds, serr := strconv.Atoi(aggregationInterval[2]) + + if herr != nil || merr != nil || serr != nil { + return nil, fmt.Errorf("errors parsing metricAggregationInterval: %v, %v, %w", herr, merr, serr) + } + + starttime = time.Now().Add(-(time.Duration(hours)*time.Hour + time.Duration(minutes)*time.Minute + time.Duration(seconds)*time.Second)).UTC() + } + interval := azquery.NewTimeInterval(starttime, endtime) + return &interval, nil +} + +func verifyAggregationTypeIsSupported(aggregationType azquery.AggregationType, data []*azquery.MetricValue) (float64, error) { + if data == nil { + err := fmt.Errorf("invalid response") + return -1, err + } + var valuePtr *float64 + switch { + case strings.EqualFold(string(azquery.AggregationTypeAverage), string(aggregationType)) && data[len(data)-1].Average != nil: + valuePtr = data[len(data)-1].Average + case strings.EqualFold(string(azquery.AggregationTypeTotal), string(aggregationType)) && data[len(data)-1].Total != nil: + valuePtr = data[len(data)-1].Total + case strings.EqualFold(string(azquery.AggregationTypeMaximum), string(aggregationType)) && data[len(data)-1].Maximum != nil: + valuePtr = data[len(data)-1].Maximum + case strings.EqualFold(string(azquery.AggregationTypeMinimum), string(aggregationType)) && data[len(data)-1].Minimum != nil: + valuePtr = data[len(data)-1].Minimum + case strings.EqualFold(string(azquery.AggregationTypeCount), string(aggregationType)) && data[len(data)-1].Count != nil: + valuePtr = data[len(data)-1].Count + default: + err := fmt.Errorf("unsupported aggregation type %s", aggregationType) + return -1, err + } + return *valuePtr, nil +} diff --git a/pkg/scalers/azure_monitor_scaler_test.go b/pkg/scalers/azure_monitor_scaler_test.go index 1cc26ca5e3c..766edc4b8e8 100644 --- a/pkg/scalers/azure_monitor_scaler_test.go +++ b/pkg/scalers/azure_monitor_scaler_test.go @@ -94,13 +94,7 @@ var testParseAzMonitorMetadata = []parseAzMonitorMetadataTestData{ {map[string]string{"resourceURI": "test/resource/uri", "tenantId": "123", "subscriptionId": "456", "resourceGroupName": "test", "metricName": "metric", "metricAggregationInterval": "0:15:0", "metricAggregationType": "Average", "activeDirectoryClientId": "CLIENT_ID", "activeDirectoryClientPasswordFromEnv": "CLIENT_PASSWORD", "targetValue": "5", "metricNamespace": "namespace", "cloud": "azureChinaCloud"}, false, testAzMonitorResolvedEnv, map[string]string{}, ""}, // private cloud {map[string]string{"resourceURI": "test/resource/uri", "tenantId": "123", "subscriptionId": "456", "resourceGroupName": "test", "metricName": "metric", "metricAggregationInterval": "0:15:0", "metricAggregationType": "Average", "activeDirectoryClientId": "CLIENT_ID", "activeDirectoryClientPasswordFromEnv": "CLIENT_PASSWORD", "targetValue": "5", "metricNamespace": "namespace", "cloud": "private", - "azureResourceManagerEndpoint": testAzureResourceManagerEndpoint, "activeDirectoryEndpoint": testActiveDirectoryEndpoint}, false, testAzMonitorResolvedEnv, map[string]string{}, ""}, - // private cloud with missing resource manager endpoint - {map[string]string{"resourceURI": "test/resource/uri", "tenantId": "123", "subscriptionId": "456", "resourceGroupName": "test", "metricName": "metric", "metricAggregationInterval": "0:15:0", "metricAggregationType": "Average", "activeDirectoryClientId": "CLIENT_ID", "activeDirectoryClientPasswordFromEnv": "CLIENT_PASSWORD", "targetValue": "5", "metricNamespace": "namespace", "cloud": "private", - "activeDirectoryEndpoint": testActiveDirectoryEndpoint}, true, testAzMonitorResolvedEnv, map[string]string{}, ""}, - // private cloud with missing active directory endpoint - {map[string]string{"resourceURI": "test/resource/uri", "tenantId": "123", "subscriptionId": "456", "resourceGroupName": "test", "metricName": "metric", "metricAggregationInterval": "0:15:0", "metricAggregationType": "Average", "activeDirectoryClientId": "CLIENT_ID", "activeDirectoryClientPasswordFromEnv": "CLIENT_PASSWORD", "targetValue": "5", "metricNamespace": "namespace", "cloud": "private", - "azureResourceManagerEndpoint": testAzureResourceManagerEndpoint}, true, testAzMonitorResolvedEnv, map[string]string{}, ""}, + "azureResourceManagerEndpoint": testAzureResourceManagerEndpoint}, false, testAzMonitorResolvedEnv, map[string]string{}, ""}, } var azMonitorMetricIdentifiers = []azMonitorMetricIdentifier{ @@ -129,7 +123,7 @@ func TestAzMonitorGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - mockAzMonitorScaler := azureMonitorScaler{"", meta, kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, logr.Discard()} + mockAzMonitorScaler := azureMonitorScaler{"", meta, logr.Discard(), nil} metricSpec := mockAzMonitorScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go index 980d1d58858..99e6de0dff4 100644 --- a/pkg/scalers/azure_pipelines_scaler.go +++ b/pkg/scalers/azure_pipelines_scaler.go @@ -194,7 +194,7 @@ func getAuthMethod(logger logr.Logger, config *scalersconfig.ScalerConfig) (stri case "", kedav1alpha1.PodIdentityProviderNone: return "", nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no personalAccessToken given or PodIdentity provider configured") case kedav1alpha1.PodIdentityProviderAzureWorkload: - cred, err := azure.NewChainedCredential(logger, config.PodIdentity.GetIdentityID(), config.PodIdentity.GetIdentityTenantID(), config.PodIdentity.Provider) + cred, err := azure.NewChainedCredential(logger, config.PodIdentity) if err != nil { return "", nil, kedav1alpha1.AuthPodIdentity{}, err } diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go index 8609d84a88a..3b4fe1d113d 100644 --- a/pkg/scalers/azure_queue_scaler.go +++ b/pkg/scalers/azure_queue_scaler.go @@ -19,9 +19,9 @@ package scalers import ( "context" "fmt" - "net/http" "strconv" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" "github.com/go-logr/logr" v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" @@ -42,8 +42,7 @@ const ( type azureQueueScaler struct { metricType v2.MetricTargetType metadata *azureQueueMetadata - podIdentity kedav1alpha1.AuthPodIdentity - httpClient *http.Client + queueClient *azqueue.QueueClient logger logr.Logger } @@ -71,11 +70,15 @@ func NewAzureQueueScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error parsing azure queue metadata: %w", err) } + queueClient, err := azure.GetStorageQueueClient(logger, podIdentity, meta.connection, meta.accountName, meta.endpointSuffix, meta.queueName, config.GlobalHTTPTimeout) + if err != nil { + return nil, fmt.Errorf("error creating azure blob client: %w", err) + } + return &azureQueueScaler{ metricType: metricType, metadata: meta, - podIdentity: podIdentity, - httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + queueClient: queueClient, logger: logger, }, nil } @@ -153,9 +156,6 @@ func parseAzureQueueMetadata(config *scalersconfig.ScalerConfig, logger logr.Log } func (s *azureQueueScaler) Close(context.Context) error { - if s.httpClient != nil { - s.httpClient.CloseIdleConnections() - } return nil } @@ -172,21 +172,12 @@ func (s *azureQueueScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS // GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *azureQueueScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - queuelen, err := azure.GetAzureQueueLength( - ctx, - s.podIdentity, - s.metadata.connection, - s.metadata.queueName, - s.metadata.accountName, - s.metadata.endpointSuffix, - ) - + props, err := s.queueClient.GetProperties(ctx, nil) if err != nil { s.logger.Error(err, "error getting queue length") return []external_metrics.ExternalMetricValue{}, false, err } - + queuelen := int64(*props.ApproximateMessagesCount) metric := GenerateMetricInMili(metricName, float64(queuelen)) - return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.activationTargetQueueLength, nil } diff --git a/pkg/scalers/azure_queue_scaler_test.go b/pkg/scalers/azure_queue_scaler_test.go index 461332a8df8..a36da33123c 100644 --- a/pkg/scalers/azure_queue_scaler_test.go +++ b/pkg/scalers/azure_queue_scaler_test.go @@ -18,7 +18,6 @@ package scalers import ( "context" - "net/http" "testing" "github.com/go-logr/logr" @@ -101,7 +100,7 @@ func TestAzQueueParseMetadata(t *testing.T) { func TestAzQueueGetMetricSpecForScaling(t *testing.T) { for _, testData := range azQueueMetricIdentifiers { - meta, podIdentity, err := parseAzureQueueMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, + meta, _, err := parseAzureQueueMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, TriggerIndex: testData.triggerIndex}, logr.Discard()) @@ -109,9 +108,7 @@ func TestAzQueueGetMetricSpecForScaling(t *testing.T) { t.Fatal("Could not parse metadata:", err) } mockAzQueueScaler := azureQueueScaler{ - metadata: meta, - podIdentity: podIdentity, - httpClient: http.DefaultClient, + metadata: meta, } metricSpec := mockAzQueueScaler.GetMetricSpecForScaling(context.Background()) diff --git a/pkg/scalers/azure_servicebus_scaler.go b/pkg/scalers/azure_servicebus_scaler.go index 242e76f1729..fc407e07973 100755 --- a/pkg/scalers/azure_servicebus_scaler.go +++ b/pkg/scalers/azure_servicebus_scaler.go @@ -21,7 +21,9 @@ import ( "fmt" "regexp" "strconv" + "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/admin" az "github.com/Azure/go-autorest/autorest/azure" "github.com/go-logr/logr" @@ -67,6 +69,7 @@ type azureServiceBusMetadata struct { entityNameRegex *regexp.Regexp operation string triggerIndex int + timeout time.Duration } // NewAzureServiceBusScaler creates a new AzureServiceBusScaler @@ -97,6 +100,7 @@ func parseAzureServiceBusMetadata(config *scalersconfig.ScalerConfig, logger log meta := azureServiceBusMetadata{} meta.entityType = none meta.targetLength = defaultTargetMessageCount + meta.timeout = config.GlobalHTTPTimeout // get target metric value if val, ok := config.TriggerMetadata[messageCountMetricName]; ok { @@ -294,15 +298,21 @@ func (s *azureServiceBusScaler) getServiceBusAdminClient() (*admin.Client, error } var err error var client *admin.Client + opts := &admin.ClientOptions{ + ClientOptions: policy.ClientOptions{ + Transport: kedautil.CreateHTTPClient(s.metadata.timeout, false), + }, + } + switch s.podIdentity.Provider { case "", kedav1alpha1.PodIdentityProviderNone: - client, err = admin.NewClientFromConnectionString(s.metadata.connection, nil) + client, err = admin.NewClientFromConnectionString(s.metadata.connection, opts) case kedav1alpha1.PodIdentityProviderAzureWorkload: - creds, chainedErr := azure.NewChainedCredential(s.logger, s.podIdentity.GetIdentityID(), s.podIdentity.GetIdentityTenantID(), s.podIdentity.Provider) + creds, chainedErr := azure.NewChainedCredential(s.logger, s.podIdentity) if chainedErr != nil { return nil, chainedErr } - client, err = admin.NewClient(s.metadata.fullyQualifiedNamespace, creds, nil) + client, err = admin.NewClient(s.metadata.fullyQualifiedNamespace, creds, opts) default: err = fmt.Errorf("incorrect podIdentity type") } diff --git a/pkg/scaling/resolver/azure_keyvault_handler.go b/pkg/scaling/resolver/azure_keyvault_handler.go index a60d1fce77f..743b6372c43 100644 --- a/pkg/scaling/resolver/azure_keyvault_handler.go +++ b/pkg/scaling/resolver/azure_keyvault_handler.go @@ -19,11 +19,10 @@ package resolver import ( "context" "fmt" - "strings" - "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault" - az "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets" "github.com/go-logr/logr" corev1listers "k8s.io/client-go/listers/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -34,7 +33,7 @@ import ( type AzureKeyVaultHandler struct { vault *kedav1alpha1.AzureKeyVault - keyvaultClient *keyvault.BaseClient + keyvaultClient *azsecrets.Client } func NewAzureKeyVaultHandler(v *kedav1alpha1.AzureKeyVault) *AzureKeyVaultHandler { @@ -44,31 +43,22 @@ func NewAzureKeyVaultHandler(v *kedav1alpha1.AzureKeyVault) *AzureKeyVaultHandle } func (vh *AzureKeyVaultHandler) Initialize(ctx context.Context, client client.Client, logger logr.Logger, triggerNamespace string, secretsLister corev1listers.SecretLister) error { - keyvaultResourceURL, activeDirectoryEndpoint, err := vh.getPropertiesForCloud() + cred, err := vh.getCredentials(ctx, client, logger, triggerNamespace, secretsLister) if err != nil { return err } - authConfig, err := vh.getAuthConfig(ctx, client, logger, triggerNamespace, keyvaultResourceURL, activeDirectoryEndpoint, secretsLister) + keyvaultClient, err := azsecrets.NewClient(vh.vault.VaultURI, cred, nil) if err != nil { return err } - authorizer, err := authConfig.Authorizer() - if err != nil { - return err - } - - keyvaultClient := keyvault.New() - keyvaultClient.Authorizer = authorizer - - vh.keyvaultClient = &keyvaultClient - + vh.keyvaultClient = keyvaultClient return nil } func (vh *AzureKeyVaultHandler) Read(ctx context.Context, secretName string, version string) (string, error) { - result, err := vh.keyvaultClient.GetSecret(ctx, vh.vault.VaultURI, secretName, version) + result, err := vh.keyvaultClient.GetSecret(ctx, secretName, version, nil) if err != nil { return "", err } @@ -76,33 +66,8 @@ func (vh *AzureKeyVaultHandler) Read(ctx context.Context, secretName string, ver return *result.Value, nil } -func (vh *AzureKeyVaultHandler) getPropertiesForCloud() (string, string, error) { - cloud := vh.vault.Cloud - - if cloud == nil { - return az.PublicCloud.ResourceIdentifiers.KeyVault, az.PublicCloud.ActiveDirectoryEndpoint, nil - } - - if strings.EqualFold(cloud.Type, azure.PrivateCloud) { - if cloud.KeyVaultResourceURL == "" || cloud.ActiveDirectoryEndpoint == "" { - err := fmt.Errorf("properties keyVaultResourceURL and activeDirectoryEndpoint must be provided for cloud %s", - azure.PrivateCloud) - return "", "", err - } - - return cloud.KeyVaultResourceURL, cloud.ActiveDirectoryEndpoint, nil - } - - env, err := az.EnvironmentFromName(cloud.Type) - if err != nil { - return "", "", err - } - - return env.ResourceIdentifiers.KeyVault, env.ActiveDirectoryEndpoint, nil -} - -func (vh *AzureKeyVaultHandler) getAuthConfig(ctx context.Context, client client.Client, logger logr.Logger, - triggerNamespace, keyVaultResourceURL, activeDirectoryEndpoint string, secretsLister corev1listers.SecretLister) (auth.AuthorizerConfig, error) { +func (vh *AzureKeyVaultHandler) getCredentials(ctx context.Context, client client.Client, logger logr.Logger, + triggerNamespace string, secretsLister corev1listers.SecretLister) (azcore.TokenCredential, error) { podIdentity := vh.vault.PodIdentity if podIdentity == nil { podIdentity = &kedav1alpha1.AuthPodIdentity{} @@ -124,14 +89,9 @@ func (vh *AzureKeyVaultHandler) getAuthConfig(ctx context.Context, client client if clientID == "" || tenantID == "" || clientSecret == "" { return nil, missingErr } - - config := auth.NewClientCredentialsConfig(clientID, clientSecret, tenantID) - config.Resource = keyVaultResourceURL - config.AADEndpoint = activeDirectoryEndpoint - - return config, nil + return azidentity.NewClientSecretCredential(tenantID, clientID, clientSecret, nil) case kedav1alpha1.PodIdentityProviderAzureWorkload: - return azure.NewAzureADWorkloadIdentityConfig(ctx, podIdentity.GetIdentityID(), podIdentity.GetIdentityTenantID(), podIdentity.GetIdentityAuthorityHost(), keyVaultResourceURL), nil + return azure.NewChainedCredential(logger, *podIdentity) default: return nil, fmt.Errorf("key vault does not support pod identity provider - %s", podIdentity.Provider) } diff --git a/pkg/scaling/resolver/azure_keyvault_handler_test.go b/pkg/scaling/resolver/azure_keyvault_handler_test.go index 4a6740f567d..541a93b84f0 100644 --- a/pkg/scaling/resolver/azure_keyvault_handler_test.go +++ b/pkg/scaling/resolver/azure_keyvault_handler_test.go @@ -17,119 +17,119 @@ limitations under the License. package resolver import ( + "context" "testing" - az "github.com/Azure/go-autorest/autorest/azure" + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) -const ( - testResourceURL = "testResourceURL" - testActiveDirectoryEndpoint = "testActiveDirectoryEndpoint" -) - -type testData struct { - name string - isError bool - vault kedav1alpha1.AzureKeyVault - expectedKVResourceURL string - expectedADEndpoint string +type AzureKeyVaultHandlerTestCase struct { + Name string + Config *kedav1alpha1.AzureKeyVault + TriggerNamespace string + ExpectedError string } -var testDataset = []testData{ - { - name: "known Azure cloud", - isError: false, - vault: kedav1alpha1.AzureKeyVault{ - Cloud: &kedav1alpha1.AzureKeyVaultCloudInfo{ - Type: "azurePublicCloud", +func TestAzureKeyVaultHandlerInitialize(t *testing.T) { + testCases := []AzureKeyVaultHandlerTestCase{ + { + Name: "Invalid Pod identity provider", + Config: &kedav1alpha1.AzureKeyVault{ + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: "xyz", + }, }, + TriggerNamespace: "testNamespace", + ExpectedError: "key vault does not support pod identity provider - xyz", }, - expectedKVResourceURL: az.PublicCloud.ResourceIdentifiers.KeyVault, - expectedADEndpoint: az.PublicCloud.ActiveDirectoryEndpoint, - }, - { - name: "private cloud", - isError: false, - vault: kedav1alpha1.AzureKeyVault{ - Cloud: &kedav1alpha1.AzureKeyVaultCloudInfo{ - Type: "private", - KeyVaultResourceURL: testResourceURL, - ActiveDirectoryEndpoint: testActiveDirectoryEndpoint, + { + Name: "Missing credentials and pod identity provider", + Config: &kedav1alpha1.AzureKeyVault{ + Credentials: nil, + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: "", + }, }, + TriggerNamespace: "testNamespace", + ExpectedError: "clientID, tenantID and clientSecret are expected when not using a pod identity provider", }, - expectedKVResourceURL: testResourceURL, - expectedADEndpoint: testActiveDirectoryEndpoint, - }, - { - name: "nil cloud info", - isError: false, - vault: kedav1alpha1.AzureKeyVault{ - Cloud: nil, - }, - expectedKVResourceURL: az.PublicCloud.ResourceIdentifiers.KeyVault, - expectedADEndpoint: az.PublicCloud.ActiveDirectoryEndpoint, - }, - { - name: "invalid cloud", - isError: true, - vault: kedav1alpha1.AzureKeyVault{ - Cloud: &kedav1alpha1.AzureKeyVaultCloudInfo{ - Type: "invalid cloud", + { + Name: "Empty trigger namespace", + Config: &kedav1alpha1.AzureKeyVault{ + Credentials: &kedav1alpha1.AzureKeyVaultCredentials{ + ClientSecret: &kedav1alpha1.AzureKeyVaultClientSecret{ + ValueFrom: kedav1alpha1.ValueFromSecret{ + SecretKeyRef: kedav1alpha1.SecretKeyRef{ + Name: "testSecretName", + Key: "testSecretKey", + }, + }, + }, + }, + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: kedav1alpha1.PodIdentityProviderNone, + }, }, + TriggerNamespace: "", + ExpectedError: "clientID, tenantID and clientSecret are expected when not using a pod identity provider", }, - expectedKVResourceURL: "", - expectedADEndpoint: "", - }, - { - name: "private cloud missing keyvault resource URL", - isError: true, - vault: kedav1alpha1.AzureKeyVault{ - Cloud: &kedav1alpha1.AzureKeyVaultCloudInfo{ - Type: "private", - ActiveDirectoryEndpoint: testActiveDirectoryEndpoint, + { + Name: "Empty credentials secret name", + Config: &kedav1alpha1.AzureKeyVault{ + Credentials: &kedav1alpha1.AzureKeyVaultCredentials{ + ClientSecret: &kedav1alpha1.AzureKeyVaultClientSecret{ + ValueFrom: kedav1alpha1.ValueFromSecret{ + SecretKeyRef: kedav1alpha1.SecretKeyRef{ + Name: "", + Key: "testSecretKey", + }, + }, + }, + }, + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: kedav1alpha1.PodIdentityProviderNone, + }, }, + TriggerNamespace: "testNamespace", + ExpectedError: "clientID, tenantID and clientSecret are expected when not using a pod identity provider", }, - expectedKVResourceURL: "", - expectedADEndpoint: "", - }, - { - name: "private cloud missing active directory endpoint", - isError: true, - vault: kedav1alpha1.AzureKeyVault{ - Cloud: &kedav1alpha1.AzureKeyVaultCloudInfo{ - Type: "private", - KeyVaultResourceURL: testResourceURL, + { + Name: "Empty credentials secret key", + Config: &kedav1alpha1.AzureKeyVault{ + Credentials: &kedav1alpha1.AzureKeyVaultCredentials{ + ClientSecret: &kedav1alpha1.AzureKeyVaultClientSecret{ + ValueFrom: kedav1alpha1.ValueFromSecret{ + SecretKeyRef: kedav1alpha1.SecretKeyRef{ + Name: "testSecretName", + Key: "", + }, + }, + }, + }, + PodIdentity: &kedav1alpha1.AuthPodIdentity{ + Provider: kedav1alpha1.PodIdentityProviderNone, + }, }, + TriggerNamespace: "testNamespace", + ExpectedError: "clientID, tenantID and clientSecret are expected when not using a pod identity provider", }, - expectedKVResourceURL: "", - expectedADEndpoint: "", - }, -} - -func TestGetPropertiesForCloud(t *testing.T) { - for _, testData := range testDataset { - vh := NewAzureKeyVaultHandler(&testData.vault) - - kvResourceURL, adEndpoint, err := vh.getPropertiesForCloud() - - if err != nil && !testData.isError { - t.Fatalf("test %s: expected success but got error - %s", testData.name, err) - } - - if err == nil && testData.isError { - t.Fatalf("test %s: expected error but got success, testData - %+v", testData.name, testData) - } - - if kvResourceURL != testData.expectedKVResourceURL { - t.Errorf("test %s: keyvault resource URl does not match. expected - %s, got - %s", - testData.name, testData.expectedKVResourceURL, kvResourceURL) - } + } - if adEndpoint != testData.expectedADEndpoint { - t.Errorf("test %s: active directory endpoint does not match. expected - %s, got - %s", - testData.name, testData.expectedADEndpoint, adEndpoint) - } + for _, testCase := range testCases { + fake.NewClientBuilder() + t.Run(testCase.Name, func(t *testing.T) { + handler := NewAzureKeyVaultHandler(testCase.Config) + err := handler.Initialize(context.TODO(), nil, logf.Log.WithName("test"), "", nil) + if testCase.ExpectedError != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), testCase.ExpectedError) + } else { + assert.NoError(t, err) + } + }) } } diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 7f6071d54f9..ed1895a6b75 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -141,7 +141,7 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, case "azure-data-explorer": return scalers.NewAzureDataExplorerScaler(config) case "azure-eventhub": - return scalers.NewAzureEventHubScaler(ctx, config) + return scalers.NewAzureEventHubScaler(config) case "azure-log-analytics": return scalers.NewAzureLogAnalyticsScaler(config) case "azure-monitor": diff --git a/tests/scalers/azure/azure_blob/azure_blob_test.go b/tests/scalers/azure/azure_blob/azure_blob_test.go index a355dcbe9b1..3792865704b 100644 --- a/tests/scalers/azure/azure_blob/azure_blob_test.go +++ b/tests/scalers/azure/azure_blob/azure_blob_test.go @@ -11,14 +11,12 @@ import ( "strings" "testing" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" ) @@ -123,10 +121,14 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") require.NotEmpty(t, connectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure blob test") - containerURL := createContainer(t) + blobClient, err := azblob.NewClientFromConnectionString(connectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, containerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) @@ -138,30 +140,14 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, containerURL) - testScaleOut(t, kc, containerURL) - testScaleIn(t, kc, containerURL) + testActivation(ctx, t, kc, blobClient) + testScaleOut(ctx, t, kc, blobClient) + testScaleIn(ctx, t, kc, blobClient) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupContainer(t, containerURL) -} - -func createContainer(t *testing.T) azblob.ContainerURL { - // Create Blob Container - credential, endpoint, err := azure.ParseAzureStorageBlobConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - connectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - serviceURL := azblob.NewServiceURL(*endpoint, p) - containerURL := serviceURL.NewContainerURL(containerName) - - _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer) - assert.NoErrorf(t, err, "cannot create blob container - %s", err) - - return containerURL + _, err = blobClient.DeleteContainer(ctx, containerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) } func getTemplateData() (templateData, []Template) { @@ -181,29 +167,25 @@ func getTemplateData() (templateData, []Template) { } } -func testActivation(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.ContainerURL) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, blobClient *azblob.Client) { t.Log("--- testing activation ---") - addFiles(t, containerURL, 4) + addFiles(ctx, t, blobClient, 4) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.ContainerURL) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, blobClient *azblob.Client) { t.Log("--- testing scale out ---") - addFiles(t, containerURL, 10) + addFiles(ctx, t, blobClient, 10) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 1), "replica count should be 2 after 1 minute") } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.ContainerURL) { +func testScaleIn(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, blobClient *azblob.Client) { t.Log("--- testing scale in ---") for i := 0; i < 10; i++ { blobName := fmt.Sprintf("blob-%d", i) - blobURL := containerURL.NewBlockBlobURL(blobName) - - _, err := blobURL.Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, - azblob.BlobAccessConditions{}) - + _, err := blobClient.DeleteBlob(ctx, containerName, blobName, nil) assert.NoErrorf(t, err, "cannot delete blob - %s", err) } @@ -211,23 +193,12 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.Con "replica count should be 0 after 1 minute") } -func addFiles(t *testing.T, containerURL azblob.ContainerURL, count int) { +func addFiles(ctx context.Context, t *testing.T, blobClient *azblob.Client, count int) { data := "Hello World!" for i := 0; i < count; i++ { blobName := fmt.Sprintf("blob-%d", i) - blobURL := containerURL.NewBlockBlobURL(blobName) - - _, err := blobURL.Upload(context.Background(), strings.NewReader(data), - azblob.BlobHTTPHeaders{ContentType: "text/plain"}, azblob.Metadata{}, azblob.BlobAccessConditions{}, - azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - + _, err := blobClient.UploadStream(ctx, containerName, blobName, strings.NewReader(data), nil) assert.NoErrorf(t, err, "cannot upload blob - %s", err) } } - -func cleanupContainer(t *testing.T, containerURL azblob.ContainerURL) { - t.Log("--- cleaning up ---") - _, err := containerURL.Delete(context.Background(), azblob.ContainerAccessConditions{}) - assert.NoErrorf(t, err, "cannot delete storage container - %s", err) -} diff --git a/tests/scalers/azure/azure_blob_aad_wi/azure_blob_aad_wi_test.go b/tests/scalers/azure/azure_blob_aad_wi/azure_blob_aad_wi_test.go index d9b11a07319..eaac4612c68 100644 --- a/tests/scalers/azure/azure_blob_aad_wi/azure_blob_aad_wi_test.go +++ b/tests/scalers/azure/azure_blob_aad_wi/azure_blob_aad_wi_test.go @@ -11,15 +11,14 @@ import ( "strings" "testing" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" ) // Load environment variables from .env file @@ -138,10 +137,15 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") require.NotEmpty(t, connectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure blob test") + accountName = azurehelper.GetAccountFromStorageConnectionString(connectionString) - containerURL := createContainer(t) + blobClient, err := azblob.NewClientFromConnectionString(connectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, containerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) @@ -153,32 +157,14 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, containerURL) - testScaleOut(t, kc, containerURL) - testScaleIn(t, kc, containerURL) + testActivation(ctx, t, kc, blobClient) + testScaleOut(ctx, t, kc, blobClient) + testScaleIn(ctx, t, kc, blobClient) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupContainer(t, containerURL) -} - -func createContainer(t *testing.T) azblob.ContainerURL { - // Create Blob Container - credential, endpoint, err := azure.ParseAzureStorageBlobConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - connectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - serviceURL := azblob.NewServiceURL(*endpoint, p) - containerURL := serviceURL.NewContainerURL(containerName) - - _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer) - assert.NoErrorf(t, err, "cannot create blob container - %s", err) - - domains := strings.Split(endpoint.Hostname(), ".") - accountName = domains[0] - return containerURL + _, err = blobClient.DeleteContainer(ctx, containerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) } func getTemplateData() (templateData, []Template) { @@ -201,29 +187,25 @@ func getTemplateData() (templateData, []Template) { } } -func testActivation(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.ContainerURL) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, blobClient *azblob.Client) { t.Log("--- testing activation ---") - addFiles(t, containerURL, 4) + addFiles(ctx, t, blobClient, 4) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.ContainerURL) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, blobClient *azblob.Client) { t.Log("--- testing scale out ---") - addFiles(t, containerURL, 10) + addFiles(ctx, t, blobClient, 10) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 1), "replica count should be 2 after 1 minute") } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.ContainerURL) { +func testScaleIn(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, blobClient *azblob.Client) { t.Log("--- testing scale in ---") for i := 0; i < 10; i++ { blobName := fmt.Sprintf("blob-%d", i) - blobURL := containerURL.NewBlockBlobURL(blobName) - - _, err := blobURL.Delete(context.Background(), azblob.DeleteSnapshotsOptionInclude, - azblob.BlobAccessConditions{}) - + _, err := blobClient.DeleteBlob(ctx, containerName, blobName, nil) assert.NoErrorf(t, err, "cannot delete blob - %s", err) } @@ -231,23 +213,12 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset, containerURL azblob.Con "replica count should be 0 after 1 minute") } -func addFiles(t *testing.T, containerURL azblob.ContainerURL, count int) { +func addFiles(ctx context.Context, t *testing.T, blobClient *azblob.Client, count int) { data := "Hello World!" for i := 0; i < count; i++ { blobName := fmt.Sprintf("blob-%d", i) - blobURL := containerURL.NewBlockBlobURL(blobName) - - _, err := blobURL.Upload(context.Background(), strings.NewReader(data), - azblob.BlobHTTPHeaders{ContentType: "text/plain"}, azblob.Metadata{}, azblob.BlobAccessConditions{}, - azblob.DefaultAccessTier, nil, azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) - + _, err := blobClient.UploadStream(ctx, containerName, blobName, strings.NewReader(data), nil) assert.NoErrorf(t, err, "cannot upload blob - %s", err) } } - -func cleanupContainer(t *testing.T, containerURL azblob.ContainerURL) { - t.Log("--- cleaning up ---") - _, err := containerURL.Delete(context.Background(), azblob.ContainerAccessConditions{}) - assert.NoErrorf(t, err, "cannot delete storage container - %s", err) -} diff --git a/tests/scalers/azure/azure_event_hub_aad_wi/azure_event_hub_aad_wi_test.go b/tests/scalers/azure/azure_event_hub_aad_wi/azure_event_hub_aad_wi_test.go index f03e3bdb038..9207f7d6594 100644 --- a/tests/scalers/azure/azure_event_hub_aad_wi/azure_event_hub_aad_wi_test.go +++ b/tests/scalers/azure/azure_event_hub_aad_wi/azure_event_hub_aad_wi_test.go @@ -7,22 +7,18 @@ import ( "context" "encoding/base64" "fmt" - "net/url" "os" - "strings" "testing" "time" - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" ) // Load environment variables from .env file @@ -34,18 +30,14 @@ const ( ) var ( - eventHubName = fmt.Sprintf("keda-eh-%d", GetRandomNumber()) - namespaceConnectionString = os.Getenv("TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING") - eventhubConnectionString = fmt.Sprintf("%s;EntityPath=%s", namespaceConnectionString, eventHubName) - storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") - checkpointContainerName = fmt.Sprintf("go-checkpoint-%d", GetRandomNumber()) - testNamespace = fmt.Sprintf("%s-ns", testName) - secretName = fmt.Sprintf("%s-secret", testName) - deploymentName = fmt.Sprintf("%s-deployment", testName) - triggerAuthName = fmt.Sprintf("%s-ta", testName) - scaledObjectName = fmt.Sprintf("%s-so", testName) - eventHubNamespaceName = "" - accountName = "" + storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") + checkpointContainerName = fmt.Sprintf("go-checkpoint-%d", GetRandomNumber()) + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + accountName = "" ) type templateData struct { @@ -155,21 +147,26 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") - require.NotEmpty(t, namespaceConnectionString, "TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING env variable is required for azure eventhub test") require.NotEmpty(t, storageConnectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure eventhub test") + accountName = azurehelper.GetAccountFromStorageConnectionString(storageConnectionString) - adminClient, client := createEventHub(t) - container := createContainer(t) + eventHubHelper := azurehelper.NewEventHubHelper(t) + eventHubHelper.CreateEventHub(ctx, t) + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) - data, templates := getTemplateData() + data, templates := getTemplateData(eventHubHelper) CreateKubernetesResources(t, kc, testNamespace, data, templates) // We need to wait till consumer creates the checkpoint - addEvents(t, client, 1) + eventHubHelper.PublishEventHubdEvents(ctx, t, 1) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") time.Sleep(time.Duration(60) * time.Second) @@ -179,69 +176,19 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, client) - testScaleOut(t, kc, client) + testActivation(ctx, t, kc, eventHubHelper) + testScaleOut(ctx, t, kc, eventHubHelper) testScaleIn(t, kc) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - deleteEventHub(t, adminClient) - deleteContainer(t, container) + eventHubHelper.DeleteEventHub(ctx, t) + _, err = blobClient.DeleteContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) } -func createEventHub(t *testing.T) (*eventhub.HubManager, *eventhub.Hub) { - eventhubManager, err := eventhub.NewHubManagerFromConnectionString(namespaceConnectionString) - assert.NoErrorf(t, err, "cannot create eventhubManager client - %s", err) - - eventHubNamespaceURL, err := url.Parse(eventhubManager.Host) - assert.NoErrorf(t, err, "cannot parse event hub namespace url - %s", err) - domains := strings.Split(eventHubNamespaceURL.Hostname(), ".") - eventHubNamespaceName = domains[0] - - opts := []eventhub.HubManagementOption{ - eventhub.HubWithPartitionCount(1), - eventhub.HubWithMessageRetentionInDays(1), - } - _, err = eventhubManager.Put(context.Background(), eventHubName, opts...) - assert.NoErrorf(t, err, "cannot create event hub - %s", err) - - eventhub, err := eventhub.NewHubFromConnectionString(eventhubConnectionString) - assert.NoErrorf(t, err, "cannot create eventhub client - %s", err) - return eventhubManager, eventhub -} - -func deleteEventHub(t *testing.T, adminClient *eventhub.HubManager) { - err := adminClient.Delete(context.Background(), eventHubName) - assert.NoErrorf(t, err, "cannot delete event hub - %s", err) -} - -func createContainer(t *testing.T) azblob.ContainerURL { - // Create Blob Container - credential, endpoint, err := azure.ParseAzureStorageBlobConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - storageConnectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - serviceURL := azblob.NewServiceURL(*endpoint, p) - containerURL := serviceURL.NewContainerURL(checkpointContainerName) - - _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer) - assert.NoErrorf(t, err, "cannot create blob container - %s", err) - - domains := strings.Split(endpoint.Hostname(), ".") - accountName = domains[0] - return containerURL -} - -func deleteContainer(t *testing.T, containerURL azblob.ContainerURL) { - t.Log("--- cleaning up ---") - _, err := containerURL.Delete(context.Background(), azblob.ContainerAccessConditions{}) - assert.NoErrorf(t, err, "cannot delete storage container - %s", err) -} - -func getTemplateData() (templateData, []Template) { - base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventhubConnectionString)) +func getTemplateData(eventHubHelper azurehelper.EventHubHelper) (templateData, []Template) { + base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventHubHelper.ConnectionString())) base64StorageConnection := base64.StdEncoding.EncodeToString([]byte(storageConnectionString)) return templateData{ @@ -255,8 +202,8 @@ func getTemplateData() (templateData, []Template) { TriggerAuthName: triggerAuthName, ConsumerGroup: eventhubConsumerGroup, AccountName: accountName, - EventHubName: eventHubName, - EventHubNamespaceName: eventHubNamespaceName, + EventHubName: eventHubHelper.EventHub(), + EventHubNamespaceName: eventHubHelper.EventHubNamespace(), }, []Template{ {Name: "secretTemplate", Config: secretTemplate}, {Name: "deploymentTemplate", Config: deploymentTemplate}, @@ -264,16 +211,16 @@ func getTemplateData() (templateData, []Template) { } } -func testActivation(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing activation ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing scale out ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") @@ -285,16 +232,3 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } - -func addEvents(t *testing.T, client *eventhub.Hub, count int) { - for i := 0; i < count; i++ { - now := time.Now() - formatted := fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02d", - now.Year(), now.Month(), now.Day(), - now.Hour(), now.Minute(), now.Second()) - msg := fmt.Sprintf("Message - %s", formatted) - err := client.Send(context.Background(), eventhub.NewEventFromString(msg)) - assert.NoErrorf(t, err, "cannot enqueue event - %s", err) - t.Logf("event queued") - } -} diff --git a/tests/scalers/azure/azure_event_hub_blob_metadata/azure_event_hub_blob_metadata_test.go b/tests/scalers/azure/azure_event_hub_blob_metadata/azure_event_hub_blob_metadata_test.go index 986c31bfd10..7472d78ee11 100644 --- a/tests/scalers/azure/azure_event_hub_blob_metadata/azure_event_hub_blob_metadata_test.go +++ b/tests/scalers/azure/azure_event_hub_blob_metadata/azure_event_hub_blob_metadata_test.go @@ -11,16 +11,14 @@ import ( "testing" "time" - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" ) // Load environment variables from .env file @@ -32,16 +30,13 @@ const ( ) var ( - eventHubName = fmt.Sprintf("keda-eh-%d", GetRandomNumber()) - namespaceConnectionString = os.Getenv("TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING") - eventhubConnectionString = fmt.Sprintf("%s;EntityPath=%s", namespaceConnectionString, eventHubName) - storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") - checkpointContainerName = fmt.Sprintf("blob-checkpoint-%d", GetRandomNumber()) - testNamespace = fmt.Sprintf("%s-ns", testName) - secretName = fmt.Sprintf("%s-secret", testName) - deploymentName = fmt.Sprintf("%s-deployment", testName) - triggerAuthName = fmt.Sprintf("%s-ta", testName) - scaledObjectName = fmt.Sprintf("%s-so", testName) + storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") + checkpointContainerName = fmt.Sprintf("blob-checkpoint-%d", GetRandomNumber()) + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) ) type templateData struct { @@ -149,22 +144,25 @@ spec: ) func TestScaler(t *testing.T) { - // setup + ctx := context.Background() t.Log("--- setting up ---") - require.NotEmpty(t, namespaceConnectionString, "TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING env variable is required for azure eventhub test") require.NotEmpty(t, storageConnectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure eventhub test") - adminClient, client := createEventHub(t) - container := createContainer(t) + eventHubHelper := azurehelper.NewEventHubHelper(t) + eventHubHelper.CreateEventHub(ctx, t) + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) - data, templates := getTemplateData() + data, templates := getTemplateData(eventHubHelper) CreateKubernetesResources(t, kc, testNamespace, data, templates) // We need to wait till consumer creates the checkpoint - addEvents(t, client, 1) + eventHubHelper.PublishEventHubdEvents(ctx, t, 1) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") time.Sleep(time.Duration(60) * time.Second) @@ -174,61 +172,19 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, client) - testScaleOut(t, kc, client) + testActivation(ctx, t, kc, eventHubHelper) + testScaleOut(ctx, t, kc, eventHubHelper) testScaleIn(t, kc) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - deleteEventHub(t, adminClient) - deleteContainer(t, container) + eventHubHelper.DeleteEventHub(ctx, t) + _, err = blobClient.DeleteContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) } -func createEventHub(t *testing.T) (*eventhub.HubManager, *eventhub.Hub) { - eventhubManager, err := eventhub.NewHubManagerFromConnectionString(namespaceConnectionString) - assert.NoErrorf(t, err, "cannot create eventhubManager client - %s", err) - opts := []eventhub.HubManagementOption{ - eventhub.HubWithPartitionCount(1), - eventhub.HubWithMessageRetentionInDays(1), - } - _, err = eventhubManager.Put(context.Background(), eventHubName, opts...) - assert.NoErrorf(t, err, "cannot create event hub - %s", err) - - eventhub, err := eventhub.NewHubFromConnectionString(eventhubConnectionString) - assert.NoErrorf(t, err, "cannot create eventhub client - %s", err) - return eventhubManager, eventhub -} - -func deleteEventHub(t *testing.T, adminClient *eventhub.HubManager) { - err := adminClient.Delete(context.Background(), eventHubName) - assert.NoErrorf(t, err, "cannot delete event hub - %s", err) -} - -func createContainer(t *testing.T) azblob.ContainerURL { - // Create Blob Container - credential, endpoint, err := azure.ParseAzureStorageBlobConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - storageConnectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - serviceURL := azblob.NewServiceURL(*endpoint, p) - containerURL := serviceURL.NewContainerURL(checkpointContainerName) - - _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer) - assert.NoErrorf(t, err, "cannot create blob container - %s", err) - - return containerURL -} - -func deleteContainer(t *testing.T, containerURL azblob.ContainerURL) { - t.Log("--- cleaning up ---") - _, err := containerURL.Delete(context.Background(), azblob.ContainerAccessConditions{}) - assert.NoErrorf(t, err, "cannot delete storage container - %s", err) -} - -func getTemplateData() (templateData, []Template) { - base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventhubConnectionString)) +func getTemplateData(eventHubHelper azurehelper.EventHubHelper) (templateData, []Template) { + base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventHubHelper.ConnectionString())) base64StorageConnection := base64.StdEncoding.EncodeToString([]byte(storageConnectionString)) return templateData{ @@ -248,16 +204,16 @@ func getTemplateData() (templateData, []Template) { } } -func testActivation(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing activation ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing scale out ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") @@ -269,16 +225,3 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } - -func addEvents(t *testing.T, client *eventhub.Hub, count int) { - for i := 0; i < count; i++ { - now := time.Now() - formatted := fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02d", - now.Year(), now.Month(), now.Day(), - now.Hour(), now.Minute(), now.Second()) - msg := fmt.Sprintf("Message - %s", formatted) - err := client.Send(context.Background(), eventhub.NewEventFromString(msg)) - assert.NoErrorf(t, err, "cannot enqueue event - %s", err) - t.Logf("event queued") - } -} diff --git a/tests/scalers/azure/azure_event_hub_dapr/azure_event_hub_dapr_test.go b/tests/scalers/azure/azure_event_hub_dapr/azure_event_hub_dapr_test.go index fc6a47b63dc..b786d3d91ef 100644 --- a/tests/scalers/azure/azure_event_hub_dapr/azure_event_hub_dapr_test.go +++ b/tests/scalers/azure/azure_event_hub_dapr/azure_event_hub_dapr_test.go @@ -12,16 +12,14 @@ import ( "testing" "time" - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" ) // Load environment variables from .env file @@ -33,18 +31,15 @@ const ( ) var ( - eventHubName = fmt.Sprintf("keda-eh-%d", GetRandomNumber()) - namespaceConnectionString = os.Getenv("TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING") - eventhubConnectionString = fmt.Sprintf("%s;EntityPath=%s", namespaceConnectionString, eventHubName) - storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") - storageAccountName = getValueFromConnectionString(storageConnectionString, "AccountName") - storageAccountKey = getValueFromConnectionString(storageConnectionString, "AccountKey") - checkpointContainerName = fmt.Sprintf("dapr-checkpoint-%d", GetRandomNumber()) - testNamespace = fmt.Sprintf("%s-ns", testName) - secretName = fmt.Sprintf("%s-secret", testName) - deploymentName = fmt.Sprintf("%s-deployment", testName) - triggerAuthName = fmt.Sprintf("%s-ta", testName) - scaledObjectName = fmt.Sprintf("%s-so", testName) + storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") + storageAccountName = getValueFromConnectionString(storageConnectionString, "AccountName") + storageAccountKey = getValueFromConnectionString(storageConnectionString, "AccountKey") + checkpointContainerName = fmt.Sprintf("blob-checkpoint-%d", GetRandomNumber()) + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) ) type templateData struct { @@ -176,22 +171,25 @@ spec: ) func TestScaler(t *testing.T) { - // setup + ctx := context.Background() t.Log("--- setting up ---") - require.NotEmpty(t, namespaceConnectionString, "TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING env variable is required for azure eventhub test") require.NotEmpty(t, storageConnectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure eventhub test") - adminClient, client := createEventHub(t) - container := createContainer(t) + eventHubHelper := azurehelper.NewEventHubHelper(t) + eventHubHelper.CreateEventHub(ctx, t) + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) - data, templates := getTemplateData() + data, templates := getTemplateData(eventHubHelper) CreateKubernetesResources(t, kc, testNamespace, data, templates) // We need to wait till consumer creates the checkpoint - addEvents(t, client, 1) + eventHubHelper.PublishEventHubdEvents(ctx, t, 1) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") time.Sleep(time.Duration(60) * time.Second) @@ -201,67 +199,25 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, client) - testScaleOut(t, kc, client) + testActivation(ctx, t, kc, eventHubHelper) + testScaleOut(ctx, t, kc, eventHubHelper) testScaleIn(t, kc) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - deleteEventHub(t, adminClient) - deleteContainer(t, container) + eventHubHelper.DeleteEventHub(ctx, t) + _, err = blobClient.DeleteContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) } -func createEventHub(t *testing.T) (*eventhub.HubManager, *eventhub.Hub) { - eventhubManager, err := eventhub.NewHubManagerFromConnectionString(namespaceConnectionString) - assert.NoErrorf(t, err, "cannot create eventhubManager client - %s", err) - opts := []eventhub.HubManagementOption{ - eventhub.HubWithPartitionCount(1), - eventhub.HubWithMessageRetentionInDays(1), - } - _, err = eventhubManager.Put(context.Background(), eventHubName, opts...) - assert.NoErrorf(t, err, "cannot create event hub - %s", err) - - eventhub, err := eventhub.NewHubFromConnectionString(eventhubConnectionString) - assert.NoErrorf(t, err, "cannot create eventhub client - %s", err) - return eventhubManager, eventhub -} - -func deleteEventHub(t *testing.T, adminClient *eventhub.HubManager) { - err := adminClient.Delete(context.Background(), eventHubName) - assert.NoErrorf(t, err, "cannot delete event hub - %s", err) -} - -func createContainer(t *testing.T) azblob.ContainerURL { - // Create Blob Container - credential, endpoint, err := azure.ParseAzureStorageBlobConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - storageConnectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - serviceURL := azblob.NewServiceURL(*endpoint, p) - containerURL := serviceURL.NewContainerURL(checkpointContainerName) - - _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer) - assert.NoErrorf(t, err, "cannot create blob container - %s", err) - - return containerURL -} - -func deleteContainer(t *testing.T, containerURL azblob.ContainerURL) { - t.Log("--- cleaning up ---") - _, err := containerURL.Delete(context.Background(), azblob.ContainerAccessConditions{}) - assert.NoErrorf(t, err, "cannot delete storage container - %s", err) -} - -func getTemplateData() (templateData, []Template) { - base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventhubConnectionString)) +func getTemplateData(eventHubHelper azurehelper.EventHubHelper) (templateData, []Template) { + base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventHubHelper.ConnectionString())) base64StorageConnection := base64.StdEncoding.EncodeToString([]byte(storageConnectionString)) return templateData{ TestNamespace: testNamespace, SecretName: secretName, - EventHubConnection: eventhubConnectionString, + EventHubConnection: eventHubHelper.ConnectionString(), StorageConnection: storageConnectionString, Base64EventHubConnection: base64EventhubConnection, Base64StorageConnection: base64StorageConnection, @@ -279,16 +235,16 @@ func getTemplateData() (templateData, []Template) { } } -func testActivation(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing activation ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing scale out ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") @@ -301,19 +257,6 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { "replica count should be 0 after 1 minute") } -func addEvents(t *testing.T, client *eventhub.Hub, count int) { - for i := 0; i < count; i++ { - now := time.Now() - formatted := fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02d", - now.Year(), now.Month(), now.Day(), - now.Hour(), now.Minute(), now.Second()) - msg := fmt.Sprintf("Message - %s", formatted) - err := client.Send(context.Background(), eventhub.NewEventFromString(msg)) - assert.NoErrorf(t, err, "cannot enqueue event - %s", err) - t.Logf("event queued") - } -} - func getValueFromConnectionString(storageAccountConnectionString string, keyName string) string { items := strings.Split(storageAccountConnectionString, ";") for _, item := range items { diff --git a/tests/scalers/azure/azure_event_hub_go_sdk/azure_event_hub_go_sdk_test.go b/tests/scalers/azure/azure_event_hub_go_sdk/azure_event_hub_go_sdk_test.go index fc812f8743e..47ad024aef1 100644 --- a/tests/scalers/azure/azure_event_hub_go_sdk/azure_event_hub_go_sdk_test.go +++ b/tests/scalers/azure/azure_event_hub_go_sdk/azure_event_hub_go_sdk_test.go @@ -11,16 +11,14 @@ import ( "testing" "time" - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" ) // Load environment variables from .env file @@ -32,16 +30,13 @@ const ( ) var ( - eventHubName = fmt.Sprintf("keda-eh-%d", GetRandomNumber()) - namespaceConnectionString = os.Getenv("TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING") - eventhubConnectionString = fmt.Sprintf("%s;EntityPath=%s", namespaceConnectionString, eventHubName) - storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") - checkpointContainerName = fmt.Sprintf("go-checkpoint-%d", GetRandomNumber()) - testNamespace = fmt.Sprintf("%s-ns", testName) - secretName = fmt.Sprintf("%s-secret", testName) - deploymentName = fmt.Sprintf("%s-deployment", testName) - triggerAuthName = fmt.Sprintf("%s-ta", testName) - scaledObjectName = fmt.Sprintf("%s-so", testName) + storageConnectionString = os.Getenv("TF_AZURE_STORAGE_CONNECTION_STRING") + checkpointContainerName = fmt.Sprintf("blob-checkpoint-%d", GetRandomNumber()) + testNamespace = fmt.Sprintf("%s-ns", testName) + secretName = fmt.Sprintf("%s-secret", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) ) type templateData struct { @@ -149,22 +144,25 @@ spec: ) func TestScaler(t *testing.T) { - // setup + ctx := context.Background() t.Log("--- setting up ---") - require.NotEmpty(t, namespaceConnectionString, "TF_AZURE_EVENTHBUS_MANAGEMENT_CONNECTION_STRING env variable is required for azure eventhub test") require.NotEmpty(t, storageConnectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure eventhub test") - adminClient, client := createEventHub(t) - container := createContainer(t) + eventHubHelper := azurehelper.NewEventHubHelper(t) + eventHubHelper.CreateEventHub(ctx, t) + blobClient, err := azblob.NewClientFromConnectionString(storageConnectionString, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = blobClient.CreateContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot create the container - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) - data, templates := getTemplateData() + data, templates := getTemplateData(eventHubHelper) CreateKubernetesResources(t, kc, testNamespace, data, templates) // We need to wait till consumer creates the checkpoint - addEvents(t, client, 1) + eventHubHelper.PublishEventHubdEvents(ctx, t, 1) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") time.Sleep(time.Duration(60) * time.Second) @@ -174,61 +172,19 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, client) - testScaleOut(t, kc, client) + testActivation(ctx, t, kc, eventHubHelper) + testScaleOut(ctx, t, kc, eventHubHelper) testScaleIn(t, kc) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - deleteEventHub(t, adminClient) - deleteContainer(t, container) + eventHubHelper.DeleteEventHub(ctx, t) + _, err = blobClient.DeleteContainer(ctx, checkpointContainerName, nil) + assert.NoErrorf(t, err, "cannot delete the container - %s", err) } -func createEventHub(t *testing.T) (*eventhub.HubManager, *eventhub.Hub) { - eventhubManager, err := eventhub.NewHubManagerFromConnectionString(namespaceConnectionString) - assert.NoErrorf(t, err, "cannot create eventhubManager client - %s", err) - opts := []eventhub.HubManagementOption{ - eventhub.HubWithPartitionCount(1), - eventhub.HubWithMessageRetentionInDays(1), - } - _, err = eventhubManager.Put(context.Background(), eventHubName, opts...) - assert.NoErrorf(t, err, "cannot create event hub - %s", err) - - eventhub, err := eventhub.NewHubFromConnectionString(eventhubConnectionString) - assert.NoErrorf(t, err, "cannot create eventhub client - %s", err) - return eventhubManager, eventhub -} - -func deleteEventHub(t *testing.T, adminClient *eventhub.HubManager) { - err := adminClient.Delete(context.Background(), eventHubName) - assert.NoErrorf(t, err, "cannot delete event hub - %s", err) -} - -func createContainer(t *testing.T) azblob.ContainerURL { - // Create Blob Container - credential, endpoint, err := azure.ParseAzureStorageBlobConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - storageConnectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - serviceURL := azblob.NewServiceURL(*endpoint, p) - containerURL := serviceURL.NewContainerURL(checkpointContainerName) - - _, err = containerURL.Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessContainer) - assert.NoErrorf(t, err, "cannot create blob container - %s", err) - - return containerURL -} - -func deleteContainer(t *testing.T, containerURL azblob.ContainerURL) { - t.Log("--- cleaning up ---") - _, err := containerURL.Delete(context.Background(), azblob.ContainerAccessConditions{}) - assert.NoErrorf(t, err, "cannot delete storage container - %s", err) -} - -func getTemplateData() (templateData, []Template) { - base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventhubConnectionString)) +func getTemplateData(eventHubHelper azurehelper.EventHubHelper) (templateData, []Template) { + base64EventhubConnection := base64.StdEncoding.EncodeToString([]byte(eventHubHelper.ConnectionString())) base64StorageConnection := base64.StdEncoding.EncodeToString([]byte(storageConnectionString)) return templateData{ @@ -248,16 +204,16 @@ func getTemplateData() (templateData, []Template) { } } -func testActivation(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing activation ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, client *eventhub.Hub) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, eventHubHelper azurehelper.EventHubHelper) { t.Log("--- testing scale out ---") - addEvents(t, client, 8) + eventHubHelper.PublishEventHubdEvents(ctx, t, 8) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") @@ -269,16 +225,3 @@ func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } - -func addEvents(t *testing.T, client *eventhub.Hub, count int) { - for i := 0; i < count; i++ { - now := time.Now() - formatted := fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02d", - now.Year(), now.Month(), now.Day(), - now.Hour(), now.Minute(), now.Second()) - msg := fmt.Sprintf("Message - %s", formatted) - err := client.Send(context.Background(), eventhub.NewEventFromString(msg)) - assert.NoErrorf(t, err, "cannot enqueue event - %s", err) - t.Logf("event queued") - } -} diff --git a/tests/scalers/azure/azure_queue/azure_queue_test.go b/tests/scalers/azure/azure_queue/azure_queue_test.go index 87ace5dfd78..a77d6393387 100644 --- a/tests/scalers/azure/azure_queue/azure_queue_test.go +++ b/tests/scalers/azure/azure_queue/azure_queue_test.go @@ -9,16 +9,13 @@ import ( "fmt" "os" "testing" - "time" - "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" ) @@ -113,10 +110,14 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") require.NotEmpty(t, connectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure queue test") - queueURL, messageURL := createQueue(t) + queueClient, err := azqueue.NewQueueClientFromConnectionString(connectionString, queueName, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = queueClient.Create(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) @@ -128,32 +129,14 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, messageURL) - testScaleOut(t, kc, messageURL) - testScaleIn(t, kc, messageURL) + testActivation(ctx, t, kc, queueClient) + testScaleOut(ctx, t, kc, queueClient) + testScaleIn(ctx, t, kc, queueClient) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupQueue(t, queueURL) -} - -func createQueue(t *testing.T) (azqueue.QueueURL, azqueue.MessagesURL) { - // Create Queue - credential, endpoint, err := azure.ParseAzureStorageQueueConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - connectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) - serviceURL := azqueue.NewServiceURL(*endpoint, p) - queueURL := serviceURL.NewQueueURL(queueName) - - _, err = queueURL.Create(context.Background(), azqueue.Metadata{}) - assert.NoErrorf(t, err, "cannot create storage queue - %s", err) - - messageURL := queueURL.NewMessagesURL() - t.Logf("Queue %s created", queueName) - return queueURL, messageURL + _, err = queueClient.Delete(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) } func getTemplateData() (templateData, []Template) { @@ -173,42 +156,35 @@ func getTemplateData() (templateData, []Template) { } } -func testActivation(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing activation ---") - addMessages(t, messageURL, 3) + addMessages(ctx, t, client, 3) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale out ---") - addMessages(t, messageURL, 5) + addMessages(ctx, t, client, 5) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleIn(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale in ---") - _, err := messageURL.Clear(context.Background()) + _, err := client.ClearMessages(ctx, nil) assert.NoErrorf(t, err, "cannot clear queue - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } -func addMessages(t *testing.T, messageURL azqueue.MessagesURL, count int) { +func addMessages(ctx context.Context, t *testing.T, client *azqueue.QueueClient, count int) { for i := 0; i < count; i++ { msg := fmt.Sprintf("Message - %d", i) - _, err := messageURL.Enqueue(context.Background(), msg, 0*time.Second, time.Hour) + _, err := client.EnqueueMessage(ctx, msg, nil) assert.NoErrorf(t, err, "cannot enqueue message - %s", err) t.Logf("Message queued") } } - -func cleanupQueue(t *testing.T, queueURL azqueue.QueueURL) { - t.Log("--- cleaning up ---") - _, err := queueURL.Delete(context.Background()) - assert.NoErrorf(t, err, "cannot delete storage queue - %s", err) - t.Logf("Queue %s deleted", queueName) -} diff --git a/tests/scalers/azure/azure_queue_aad_wi/azure_queue_aad_wi_test.go b/tests/scalers/azure/azure_queue_aad_wi/azure_queue_aad_wi_test.go index 88fc793bdc8..81d08405978 100644 --- a/tests/scalers/azure/azure_queue_aad_wi/azure_queue_aad_wi_test.go +++ b/tests/scalers/azure/azure_queue_aad_wi/azure_queue_aad_wi_test.go @@ -8,19 +8,16 @@ import ( "encoding/base64" "fmt" "os" - "strings" "testing" - "time" - "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" + azurehelper "github.com/kedacore/keda/v2/tests/scalers/azure/helper" ) // Load environment variables from .env file @@ -129,10 +126,15 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") require.NotEmpty(t, connectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for azure queue test") + accountName = azurehelper.GetAccountFromStorageConnectionString(connectionString) - queueURL, messageURL := createQueue(t) + queueClient, err := azqueue.NewQueueClientFromConnectionString(connectionString, queueName, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = queueClient.Create(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) @@ -144,36 +146,14 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testActivation(t, kc, messageURL) - testScaleOut(t, kc, messageURL) - testScaleIn(t, kc, messageURL) + testActivation(ctx, t, kc, queueClient) + testScaleOut(ctx, t, kc, queueClient) + testScaleIn(ctx, t, kc, queueClient) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupQueue(t, queueURL) -} - -func createQueue(t *testing.T) (azqueue.QueueURL, azqueue.MessagesURL) { - // Create Queue - credential, endpoint, err := azure.ParseAzureStorageQueueConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - connectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) - serviceURL := azqueue.NewServiceURL(*endpoint, p) - queueURL := serviceURL.NewQueueURL(queueName) - - _, err = queueURL.Create(context.Background(), azqueue.Metadata{}) - assert.NoErrorf(t, err, "cannot create storage queue - %s", err) - - messageURL := queueURL.NewMessagesURL() - t.Logf("Queue %s created", queueName) - - domains := strings.Split(endpoint.Hostname(), ".") - accountName = domains[0] - - return queueURL, messageURL + _, err = queueClient.Delete(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) } func getTemplateData() (templateData, []Template) { @@ -195,43 +175,35 @@ func getTemplateData() (templateData, []Template) { {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, } } - -func testActivation(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testActivation(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing activation ---") - addMessages(t, messageURL, 3) + addMessages(ctx, t, client, 3) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale out ---") - addMessages(t, messageURL, 5) + addMessages(ctx, t, client, 5) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleIn(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale in ---") - _, err := messageURL.Clear(context.Background()) + _, err := client.ClearMessages(ctx, nil) assert.NoErrorf(t, err, "cannot clear queue - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } -func addMessages(t *testing.T, messageURL azqueue.MessagesURL, count int) { +func addMessages(ctx context.Context, t *testing.T, client *azqueue.QueueClient, count int) { for i := 0; i < count; i++ { msg := fmt.Sprintf("Message - %d", i) - _, err := messageURL.Enqueue(context.Background(), msg, 0*time.Second, time.Hour) + _, err := client.EnqueueMessage(ctx, msg, nil) assert.NoErrorf(t, err, "cannot enqueue message - %s", err) t.Logf("Message queued") } } - -func cleanupQueue(t *testing.T, queueURL azqueue.QueueURL) { - t.Log("--- cleaning up ---") - _, err := queueURL.Delete(context.Background()) - assert.NoErrorf(t, err, "cannot delete storage queue - %s", err) - t.Logf("Queue %s deleted", queueName) -} diff --git a/tests/scalers/azure/helper/EventHubHelper.go b/tests/scalers/azure/helper/EventHubHelper.go new file mode 100644 index 00000000000..5f89ec688df --- /dev/null +++ b/tests/scalers/azure/helper/EventHubHelper.go @@ -0,0 +1,133 @@ +//go:build e2e +// +build e2e + +package helper + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/kedacore/keda/v2/tests/helper" +) + +type EventHubHelper struct { + tenant string + subscription string + resourceGroup string + eventHubNamespace string + eventHubName string + clientID string + clientSecret string + policyName string + connectionString string + producer *azeventhubs.ProducerClient +} + +func NewEventHubHelper(t *testing.T) EventHubHelper { + require.NotEmpty(t, os.Getenv("TF_AZURE_SP_TENANT"), "TF_AZURE_SP_TENANT env variable is required for azure eventhub test") + require.NotEmpty(t, os.Getenv("TF_AZURE_SUBSCRIPTION"), "TF_AZURE_SUBSCRIPTION env variable is required for azure eventhub test") + require.NotEmpty(t, os.Getenv("TF_AZURE_RESOURCE_GROUP"), "TF_AZURE_RESOURCE_GROUP env variable is required for azure eventhub test") + require.NotEmpty(t, os.Getenv("TF_AZURE_EVENTHUB_NAMESPACE"), "TF_AZURE_EVENTHUB_NAMESPACE env variable is required for azure eventhub test") + require.NotEmpty(t, os.Getenv("TF_AZURE_SP_APP_ID"), "TF_AZURE_SP_APP_ID env variable is required for azure eventhub test") + require.NotEmpty(t, os.Getenv("AZURE_SP_KEY"), "AZURE_SP_KEY env variable is required for azure eventhub test") + + randomNumber := helper.GetRandomNumber() + return EventHubHelper{ + tenant: os.Getenv("TF_AZURE_SP_TENANT"), + subscription: os.Getenv("TF_AZURE_SUBSCRIPTION"), + resourceGroup: os.Getenv("TF_AZURE_RESOURCE_GROUP"), + eventHubNamespace: os.Getenv("TF_AZURE_EVENTHUB_NAMESPACE"), + clientID: os.Getenv("TF_AZURE_SP_APP_ID"), + clientSecret: os.Getenv("AZURE_SP_KEY"), + policyName: "e2e-tests", + eventHubName: fmt.Sprintf("keda-eh-%d", randomNumber), + } +} + +func (e *EventHubHelper) CreateEventHub(ctx context.Context, t *testing.T) { + cred, err := azidentity.NewClientSecretCredential(e.tenant, e.clientID, e.clientSecret, nil) + assert.NoErrorf(t, err, "cannot create azure credentials - %s", err) + + hubFactory, err := armeventhub.NewClientFactory(e.subscription, cred, nil) + assert.NoErrorf(t, err, "cannot create azure arm factory - %s", err) + + hubClient := hubFactory.NewEventHubsClient() + _, err = hubClient.CreateOrUpdate(ctx, e.resourceGroup, e.eventHubNamespace, e.eventHubName, armeventhub.Eventhub{ + Properties: &armeventhub.Properties{ + MessageRetentionInDays: to.Ptr[int64](1), + PartitionCount: to.Ptr[int64](1), + Status: to.Ptr(armeventhub.EntityStatusActive), + CaptureDescription: nil, + }, + }, &armeventhub.EventHubsClientCreateOrUpdateOptions{}) + assert.NoErrorf(t, err, "cannot create azure event hub - %s", err) + + _, err = hubClient.CreateOrUpdateAuthorizationRule(ctx, e.resourceGroup, e.eventHubNamespace, e.eventHubName, e.policyName, armeventhub.AuthorizationRule{ + Properties: &armeventhub.AuthorizationRuleProperties{ + Rights: []*armeventhub.AccessRights{ + to.Ptr(armeventhub.AccessRightsListen), + to.Ptr(armeventhub.AccessRightsManage), + to.Ptr(armeventhub.AccessRightsSend), + }, + }, + }, nil) + assert.NoErrorf(t, err, "cannot create azure event hub - %s", err) + + keys, err := hubClient.ListKeys(ctx, e.resourceGroup, e.eventHubNamespace, e.eventHubName, e.policyName, nil) + assert.NoErrorf(t, err, "cannot get azure event hub keys- %s", err) + e.connectionString = *keys.PrimaryConnectionString + + producer, err := azeventhubs.NewProducerClientFromConnectionString(e.connectionString, "", nil) + e.producer = producer + assert.NoErrorf(t, err, "cannot create event hub producer - %s", err) +} + +func (e *EventHubHelper) DeleteEventHub(ctx context.Context, t *testing.T) { + cred, err := azidentity.NewClientSecretCredential(e.tenant, e.clientID, e.clientSecret, nil) + assert.NoErrorf(t, err, "cannot create azure credentials - %s", err) + hubFactory, err := armeventhub.NewClientFactory(e.subscription, cred, nil) + assert.NoErrorf(t, err, "cannot create azure arm factory - %s", err) + hubClient := hubFactory.NewEventHubsClient() + _, err = hubClient.Delete(ctx, e.resourceGroup, e.eventHubNamespace, e.eventHubName, nil) + assert.NoErrorf(t, err, "cannot delete event hub - %s", err) +} + +func (e *EventHubHelper) PublishEventHubdEvents(ctx context.Context, t *testing.T, count int) { + batch, err := e.producer.NewEventDataBatch(ctx, nil) + assert.NoErrorf(t, err, "cannot create the batch - %s", err) + for i := 0; i < count; i++ { + now := time.Now() + formatted := fmt.Sprintf("%d-%02d-%02dT%02d:%02d:%02d", + now.Year(), now.Month(), now.Day(), + now.Hour(), now.Minute(), now.Second()) + msg := fmt.Sprintf("Message - %s", formatted) + err = batch.AddEventData(&azeventhubs.EventData{ + Body: []byte(msg), + }, nil) + assert.NoErrorf(t, err, "cannot batch the event - %s", err) + } + err = e.producer.SendEventDataBatch(ctx, batch, nil) + assert.NoErrorf(t, err, "cannot send the batch - %s", err) +} + +func (e *EventHubHelper) EventHubNamespace() string { + return e.eventHubNamespace +} + +func (e *EventHubHelper) EventHub() string { + return e.eventHubName +} + +func (e *EventHubHelper) ConnectionString() string { + return e.connectionString +} diff --git a/tests/scalers/azure/helper/StorageHelper.go b/tests/scalers/azure/helper/StorageHelper.go new file mode 100644 index 00000000000..99cde661e90 --- /dev/null +++ b/tests/scalers/azure/helper/StorageHelper.go @@ -0,0 +1,26 @@ +//go:build e2e +// +build e2e + +package helper + +import ( + "strings" +) + +func GetAccountFromStorageConnectionString(connection string) string { + parts := strings.Split(connection, ";") + + getValue := func(pair string) string { + parts := strings.SplitN(pair, "=", 2) + if len(parts) == 2 { + return parts[1] + } + return "" + } + for _, v := range parts { + if strings.HasPrefix(v, "AccountName") { + return getValue(v) + } + } + return "" +} diff --git a/tests/secret-providers/azure_keyvault/azure_keyvault_test.go b/tests/secret-providers/azure_keyvault/azure_keyvault_test.go index 37fc4e007cb..13cb081e4b2 100644 --- a/tests/secret-providers/azure_keyvault/azure_keyvault_test.go +++ b/tests/secret-providers/azure_keyvault/azure_keyvault_test.go @@ -9,16 +9,13 @@ import ( "fmt" "os" "testing" - "time" - "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" ) @@ -146,6 +143,7 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") require.NotEmpty(t, connectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for key vault tests") require.NotEmpty(t, keyvaultURI, "TF_AZURE_KEYVAULT_URI env variable is required for key vault tests") @@ -153,7 +151,10 @@ func TestScaler(t *testing.T) { require.NotEmpty(t, azureADSecret, "AZURE_SP_KEY env variable is required for key vault tests") require.NotEmpty(t, azureADTenantID, "TF_AZURE_SP_TENANT env variable is required for key vault tests") - queueURL, messageURL := createQueue(t) + queueClient, err := azqueue.NewQueueClientFromConnectionString(connectionString, queueName, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = queueClient.Create(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) @@ -165,31 +166,13 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testScaleOut(t, kc, messageURL) - testScaleIn(t, kc, messageURL) + testScaleOut(ctx, t, kc, queueClient) + testScaleIn(ctx, t, kc, queueClient) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupQueue(t, queueURL) -} - -func createQueue(t *testing.T) (azqueue.QueueURL, azqueue.MessagesURL) { - // Create Queue - credential, endpoint, err := azure.ParseAzureStorageQueueConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - connectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) - serviceURL := azqueue.NewServiceURL(*endpoint, p) - queueURL := serviceURL.NewQueueURL(queueName) - - _, err = queueURL.Create(context.Background(), azqueue.Metadata{}) - assert.NoErrorf(t, err, "cannot create storage queue - %s", err) - - messageURL := queueURL.NewMessagesURL() - - return queueURL, messageURL + _, err = queueClient.Delete(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) } func getTemplateData() (templateData, []Template) { @@ -216,29 +199,24 @@ func getTemplateData() (templateData, []Template) { } } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale out ---") for i := 0; i < 5; i++ { msg := fmt.Sprintf("Message - %d", i) - _, err := messageURL.Enqueue(context.Background(), msg, 0*time.Second, time.Hour) + _, err := client.EnqueueMessage(ctx, msg, nil) assert.NoErrorf(t, err, "cannot enqueue message - %s", err) + t.Logf("Message queued") } assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), - "replica count should be 0 after 1 minute") + "replica count should be 1 after 1 minute") } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleIn(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale in ---") - _, err := messageURL.Clear(context.Background()) + _, err := client.ClearMessages(ctx, nil) assert.NoErrorf(t, err, "cannot clear queue - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } - -func cleanupQueue(t *testing.T, queueURL azqueue.QueueURL) { - t.Log("--- cleaning up ---") - _, err := queueURL.Delete(context.Background()) - assert.NoErrorf(t, err, "cannot create storage queue - %s", err) -} diff --git a/tests/secret-providers/azure_keyvault_workload_identity/azure_keyvault_workload_identity_test.go b/tests/secret-providers/azure_keyvault_workload_identity/azure_keyvault_workload_identity_test.go index d07a044d341..2070dc88e39 100644 --- a/tests/secret-providers/azure_keyvault_workload_identity/azure_keyvault_workload_identity_test.go +++ b/tests/secret-providers/azure_keyvault_workload_identity/azure_keyvault_workload_identity_test.go @@ -9,16 +9,13 @@ import ( "fmt" "os" "testing" - "time" - "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" ) @@ -133,11 +130,15 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") require.NotEmpty(t, connectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for key vault tests") require.NotEmpty(t, keyvaultURI, "TF_AZURE_KEYVAULT_URI env variable is required for key vault tests") - queueURL, messageURL := createQueue(t) + queueClient, err := azqueue.NewQueueClientFromConnectionString(connectionString, queueName, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = queueClient.Create(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) @@ -149,31 +150,13 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testScaleOut(t, kc, messageURL) - testScaleIn(t, kc, messageURL) + testScaleOut(ctx, t, kc, queueClient) + testScaleIn(ctx, t, kc, queueClient) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupQueue(t, queueURL) -} - -func createQueue(t *testing.T) (azqueue.QueueURL, azqueue.MessagesURL) { - // Create Queue - credential, endpoint, err := azure.ParseAzureStorageQueueConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - connectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) - serviceURL := azqueue.NewServiceURL(*endpoint, p) - queueURL := serviceURL.NewQueueURL(queueName) - - _, err = queueURL.Create(context.Background(), azqueue.Metadata{}) - assert.NoErrorf(t, err, "cannot create storage queue - %s", err) - - messageURL := queueURL.NewMessagesURL() - - return queueURL, messageURL + _, err = queueClient.Delete(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) } func getTemplateData() (templateData, []Template) { @@ -196,29 +179,24 @@ func getTemplateData() (templateData, []Template) { } } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale out ---") for i := 0; i < 5; i++ { msg := fmt.Sprintf("Message - %d", i) - _, err := messageURL.Enqueue(context.Background(), msg, 0*time.Second, time.Hour) + _, err := client.EnqueueMessage(ctx, msg, nil) assert.NoErrorf(t, err, "cannot enqueue message - %s", err) + t.Logf("Message queued") } assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), - "replica count should be 0 after 1 minute") + "replica count should be 1 after 1 minute") } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleIn(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale in ---") - _, err := messageURL.Clear(context.Background()) + _, err := client.ClearMessages(ctx, nil) assert.NoErrorf(t, err, "cannot clear queue - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } - -func cleanupQueue(t *testing.T, queueURL azqueue.QueueURL) { - t.Log("--- cleaning up ---") - _, err := queueURL.Delete(context.Background()) - assert.NoErrorf(t, err, "cannot create storage queue - %s", err) -} diff --git a/tests/secret-providers/trigger_auth_secret/trigger_auth_secret_test.go b/tests/secret-providers/trigger_auth_secret/trigger_auth_secret_test.go index 17a493b75e6..4b9c101f517 100644 --- a/tests/secret-providers/trigger_auth_secret/trigger_auth_secret_test.go +++ b/tests/secret-providers/trigger_auth_secret/trigger_auth_secret_test.go @@ -9,16 +9,13 @@ import ( "fmt" "os" "testing" - "time" - "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" - "github.com/kedacore/keda/v2/pkg/scalers/azure" . "github.com/kedacore/keda/v2/tests/helper" ) @@ -128,10 +125,14 @@ spec: func TestScaler(t *testing.T) { // setup + ctx := context.Background() t.Log("--- setting up ---") require.NotEmpty(t, connectionString, "TF_AZURE_STORAGE_CONNECTION_STRING env variable is required for trigger auth tests") - queueURL, messageURL := createQueue(t) + queueClient, err := azqueue.NewQueueClientFromConnectionString(connectionString, queueName, nil) + assert.NoErrorf(t, err, "cannot create the queue client - %s", err) + _, err = queueClient.Create(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) // Create kubernetes resources kc := GetKubernetesClient(t) @@ -143,31 +144,13 @@ func TestScaler(t *testing.T) { "replica count should be 0 after 1 minute") // test scaling - testScaleOut(t, kc, messageURL) - testScaleIn(t, kc, messageURL) + testScaleOut(ctx, t, kc, queueClient) + testScaleIn(ctx, t, kc, queueClient) // cleanup DeleteKubernetesResources(t, testNamespace, data, templates) - cleanupQueue(t, queueURL) -} - -func createQueue(t *testing.T) (azqueue.QueueURL, azqueue.MessagesURL) { - // Create Queue - credential, endpoint, err := azure.ParseAzureStorageQueueConnection( - context.Background(), kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, - connectionString, "", "") - assert.NoErrorf(t, err, "cannot parse storage connection string - %s", err) - - p := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) - serviceURL := azqueue.NewServiceURL(*endpoint, p) - queueURL := serviceURL.NewQueueURL(queueName) - - _, err = queueURL.Create(context.Background(), azqueue.Metadata{}) - assert.NoErrorf(t, err, "cannot create storage queue - %s", err) - - messageURL := queueURL.NewMessagesURL() - - return queueURL, messageURL + _, err = queueClient.Delete(ctx, nil) + assert.NoErrorf(t, err, "cannot create the queue - %s", err) } func getTemplateData() (templateData, []Template) { @@ -189,29 +172,24 @@ func getTemplateData() (templateData, []Template) { } } -func testScaleOut(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleOut(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale out ---") for i := 0; i < 5; i++ { msg := fmt.Sprintf("Message - %d", i) - _, err := messageURL.Enqueue(context.Background(), msg, 0*time.Second, time.Hour) + _, err := client.EnqueueMessage(ctx, msg, nil) assert.NoErrorf(t, err, "cannot enqueue message - %s", err) + t.Logf("Message queued") } assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1), "replica count should be 1 after 1 minute") } -func testScaleIn(t *testing.T, kc *kubernetes.Clientset, messageURL azqueue.MessagesURL) { +func testScaleIn(ctx context.Context, t *testing.T, kc *kubernetes.Clientset, client *azqueue.QueueClient) { t.Log("--- testing scale in ---") - _, err := messageURL.Clear(context.Background()) + _, err := client.ClearMessages(ctx, nil) assert.NoErrorf(t, err, "cannot clear queue - %s", err) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 1), "replica count should be 0 after 1 minute") } - -func cleanupQueue(t *testing.T, queueURL azqueue.QueueURL) { - t.Log("--- cleaning up ---") - _, err := queueURL.Delete(context.Background()) - assert.NoErrorf(t, err, "cannot delete storage queue - %s", err) -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/.gitignore b/vendor/github.com/Azure/azure-amqp-common-go/v4/.gitignore deleted file mode 100644 index c805fdabe45..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/.gitignore +++ /dev/null @@ -1,19 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -vendor -.idea - -.DS_Store diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/Makefile b/vendor/github.com/Azure/azure-amqp-common-go/v4/Makefile deleted file mode 100644 index 371bf356d8c..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -PACKAGE = github.com/Azure/azure-amqp-common-go -DATE ?= $(shell date +%FT%T%z) -VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \ - cat $(CURDIR)/.version 2> /dev/null || echo v0) -BIN = $(GOPATH)/bin -BASE = $(CURDIR) -PKGS = $(or $(PKG),$(shell cd $(BASE) && env GOPATH=$(GOPATH) $(GO) list ./... | grep -vE "^$(PACKAGE)/_examples|templates/")) -TESTPKGS = $(shell env GOPATH=$(GOPATH) $(GO) list -f '{{ if or .TestGoFiles .XTestGoFiles }}{{ .ImportPath }}{{ end }}' $(PKGS)) -GO_FILES = find . -iname '*.go' -type f - -GO = go -GODOC = godoc -GOFMT = gofmt -GOCYCLO = gocyclo - -V = 0 -Q = $(if $(filter 1,$V),,@) -M = $(shell printf "\033[34;1m▶\033[0m") -TIMEOUT = 360 - -.PHONY: all -all: fmt go.sum lint vet tidy | $(BASE) ; $(info $(M) building library…) @ ## Build program - $Q cd $(BASE) && $(GO) build \ - -tags release \ - -ldflags '-X $(PACKAGE)/cmd.Version=$(VERSION) -X $(PACKAGE)/cmd.BuildDate=$(DATE)' \ - ./... - -$(BASE): ; $(info $(M) setting GOPATH…) - @mkdir -p $(dir $@) - @ln -sf $(CURDIR) $@ - -# Tools - -GOLINT = $(BIN)/golint -$(BIN)/golint: | $(BASE) ; $(info $(M) building golint…) - $Q go get -u golang.org/x/lint/golint - -.PHONY: tidy -tidy: ; $(info $(M) running tidy…) @ ## Run tidy - $Q $(GO) mod tidy - -# Tests - -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-debug -.PHONY: $(TEST_TARGETS) test-xml check test tests -test-bench: ARGS=-run=__absolutelynothing__ -bench=. ## Run benchmarks -test-short: ARGS=-short ## Run only short tests -test-verbose: ARGS=-v ## Run tests in verbose mode -test-debug: ARGS=-v -debug ## Run tests in verbose mode with debug output -test-race: ARGS=-race ## Run tests with race detector -test-cover: ARGS=-cover ## Run tests in verbose mode with coverage -$(TEST_TARGETS): NAME=$(MAKECMDGOALS:test-%=%) -$(TEST_TARGETS): test -check test tests: cyclo lint vet go.sum | $(BASE) ; $(info $(M) running $(NAME:%=% )tests…) @ ## Run tests - $Q cd $(BASE) && $(GO) test -timeout $(TIMEOUT)s $(ARGS) $(TESTPKGS) - -.PHONY: vet -vet: go.sum | $(BASE) $(GOLINT) ; $(info $(M) running vet…) @ ## Run vet - $Q cd $(BASE) && $(GO) vet ./... - -.PHONY: lint -lint: go.sum | $(BASE) $(GOLINT) ; $(info $(M) running golint…) @ ## Run golint - $Q cd $(BASE) && ret=0 && for pkg in $(PKGS); do \ - test -z "$$($(GOLINT) $$pkg | tee /dev/stderr)" || ret=1 ; \ - done ; exit $$ret - -.PHONY: fmt -fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files - @ret=0 && for d in $$($(GO) list -f '{{.Dir}}' ./...); do \ - $(GOFMT) -l -w $$d/*.go || ret=$$? ; \ - done ; exit $$ret - -.PHONY: cyclo -cyclo: ; $(info $(M) running gocyclo...) @ ## Run gocyclo on all source files - $Q cd $(BASE) && $(GOCYCLO) -over 19 $$($(GO_FILES)) -# Dependency management - -go.sum: go.mod ; $(info $(M) verifying modules...) @ ## Run go mod verify - $Q cd $(BASE) && $(GO) mod verify - -go.mod: - $Q cd $(BASE) && $(GO) mod tidy - -# Misc - -.PHONY: clean -clean: ; $(info $(M) cleaning…) @ ## Cleanup everything - @rm -rf test/tests.* test/coverage.* - -.PHONY: help -help: - @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ - awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' - -.PHONY: version -version: - @echo $(VERSION) \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/README.md b/vendor/github.com/Azure/azure-amqp-common-go/v4/README.md deleted file mode 100644 index ed6da7d81dc..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Azure AMQP Common -[![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-amqp-common-go)](https://goreportcard.com/report/github.com/Azure/azure-amqp-common-go) -[![godoc](https://godoc.org/github.com/Azure/azure-amqp-common-go?status.svg)](https://godoc.org/github.com/Azure/azure-amqp-common-go) -[![Build Status](https://travis-ci.org/Azure/azure-amqp-common-go.svg?branch=master)](https://travis-ci.org/Azure/azure-amqp-common-go) - -This project contains reusable components for AMQP based services like Event Hub and Service Bus. You will find -abstractions over authentication, claims-based security, connection string parsing and RPC for AMQP. - -If you are looking for the Azure Event Hub library for go, you can find it [here](https://aka.ms/azure-event-hubs-go). - -If you are looking for the Azure Service Bus library for go, you can find it [here](https://aka.ms/azure-service-bus-go). - -## Install with Go modules -If you want to use stable versions of the library, please use Go modules. - -**NOTE**: versions prior to 3.0.0 depend on pack.ag/amqp which is no longer maintained. -Any new code should not use versions prior to 3.0.0. - -### Using go get targeting version 4.x.x -``` bash -go get github.com/Azure/azure-amqp-common-go/v4 -``` - -### Using go get targeting version 3.x.x -``` bash -go get github.com/Azure/azure-amqp-common-go/v3 -``` - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## License - -MIT, see [LICENSE](./LICENSE). - -## Contribute - -See [CONTRIBUTING.md](.github/CONTRIBUTING.md). diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/SECURITY.md b/vendor/github.com/Azure/azure-amqp-common-go/v4/SECURITY.md deleted file mode 100644 index e138ec5d6a7..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/SECURITY.md +++ /dev/null @@ -1,41 +0,0 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/aad/jwt.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/aad/jwt.go deleted file mode 100644 index 2745158ce84..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/aad/jwt.go +++ /dev/null @@ -1,253 +0,0 @@ -// Package aad provides an implementation of an Azure Active Directory JWT provider which implements TokenProvider -// from package auth for use with Azure Event Hubs and Service Bus. -package aad - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "crypto/rsa" - "crypto/x509" - "fmt" - "os" - "strconv" - "time" - - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "golang.org/x/crypto/pkcs12" - - "github.com/Azure/azure-amqp-common-go/v4/auth" -) - -const ( - eventhubResourceURI = "https://eventhubs.azure.net/" -) - -type ( - // TokenProviderConfiguration provides configuration parameters for building JWT AAD providers - TokenProviderConfiguration struct { - TenantID string - ClientID string - ClientSecret string - CertificatePath string - CertificatePassword string - ResourceURI string - aadToken *adal.ServicePrincipalToken - Env *azure.Environment - } - - // TokenProvider provides cbs.TokenProvider functionality for Azure Active Directory JWTs - TokenProvider struct { - tokenProvider *adal.ServicePrincipalToken - } - - // JWTProviderOption provides configuration options for constructing AAD Token Providers - JWTProviderOption func(provider *TokenProviderConfiguration) error -) - -// JWTProviderWithAzureEnvironment configures the token provider to use a specific Azure Environment -func JWTProviderWithAzureEnvironment(env *azure.Environment) JWTProviderOption { - return func(config *TokenProviderConfiguration) error { - config.Env = env - return nil - } -} - -// JWTProviderWithEnvironmentVars configures the TokenProvider using the environment variables available -// -// 1. Client Credentials: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID" and -// "AZURE_CLIENT_SECRET" -// -// 2. Client Certificate: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID", -// "AZURE_CERTIFICATE_PATH" and "AZURE_CERTIFICATE_PASSWORD" -// -// 3. Managed Service Identity (MSI): attempt to authenticate via MSI -// -// The Azure Environment used can be specified using the name of the Azure Environment set in "AZURE_ENVIRONMENT" var. -func JWTProviderWithEnvironmentVars() JWTProviderOption { - return func(config *TokenProviderConfiguration) error { - config.TenantID = os.Getenv("AZURE_TENANT_ID") - config.ClientID = os.Getenv("AZURE_CLIENT_ID") - config.ClientSecret = os.Getenv("AZURE_CLIENT_SECRET") - config.CertificatePath = os.Getenv("AZURE_CERTIFICATE_PATH") - config.CertificatePassword = os.Getenv("AZURE_CERTIFICATE_PASSWORD") - - if config.Env == nil { - env, err := azureEnvFromEnvironment() - if err != nil { - return err - } - config.Env = env - } - return nil - } -} - -// JWTProviderWithResourceURI configures the token provider to use a specific eventhubResourceURI URI -func JWTProviderWithResourceURI(resourceURI string) JWTProviderOption { - return func(config *TokenProviderConfiguration) error { - config.ResourceURI = resourceURI - return nil - } -} - -// JWTProviderWithAADToken configures the token provider to use a specific Azure Active Directory Service Principal token -func JWTProviderWithAADToken(aadToken *adal.ServicePrincipalToken) JWTProviderOption { - return func(config *TokenProviderConfiguration) error { - config.aadToken = aadToken - return nil - } -} - -// NewJWTProvider builds an Azure Active Directory claims-based security token provider -func NewJWTProvider(opts ...JWTProviderOption) (*TokenProvider, error) { - config := &TokenProviderConfiguration{ - ResourceURI: eventhubResourceURI, - } - - for _, opt := range opts { - err := opt(config) - if err != nil { - return nil, err - } - } - - if config.aadToken == nil { - spToken, err := config.NewServicePrincipalToken() - if err != nil { - return nil, err - } - config.aadToken = spToken - } - return &TokenProvider{tokenProvider: config.aadToken}, nil -} - -// NewServicePrincipalToken creates a new Azure Active Directory Service Principal token provider -func (c *TokenProviderConfiguration) NewServicePrincipalToken() (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(c.Env.ActiveDirectoryEndpoint, c.TenantID) - if err != nil { - return nil, err - } - - // 1.Client Credentials - if c.ClientSecret != "" { - spToken, err := adal.NewServicePrincipalToken(*oauthConfig, c.ClientID, c.ClientSecret, c.ResourceURI) - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err) - } - if err := spToken.Refresh(); err != nil { - return nil, fmt.Errorf("failed to refresh token: %v", spToken) - } - return spToken, nil - } - - // 2. Client Certificate - if c.CertificatePath != "" { - certData, err := os.ReadFile(c.CertificatePath) - if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", c.CertificatePath, err) - } - certificate, rsaPrivateKey, err := decodePkcs12(certData, c.CertificatePassword) - if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) - } - spToken, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, c.ClientID, certificate, rsaPrivateKey, c.ResourceURI) - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) - } - if err := spToken.Refresh(); err != nil { - return nil, fmt.Errorf("failed to refresh token: %v", err) - } - return spToken, nil - } - - // 3. By default return MSI - msiEndpoint, err := adal.GetMSIVMEndpoint() - if err != nil { - return nil, err - } - spToken, err := c.getMSIToken(msiEndpoint) - if err != nil { - return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) - } - if err := spToken.Refresh(); err != nil { - return nil, fmt.Errorf("failed to refresh token: %v", spToken) - } - return spToken, nil -} - -func (c *TokenProviderConfiguration) getMSIToken(msiEndpoint string) (*adal.ServicePrincipalToken, error) { - if c.ClientID == "" { - return adal.NewServicePrincipalTokenFromMSI(msiEndpoint, c.ResourceURI) - } - return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, c.ResourceURI, c.ClientID) -} - -// GetToken gets a CBS JWT -func (t *TokenProvider) GetToken(audience string) (*auth.Token, error) { - token := t.tokenProvider.Token() - expireTicks, err := strconv.ParseInt(string(token.ExpiresOn), 10, 64) - if err != nil { - return nil, err - } - expires := time.Unix(expireTicks, 0) - - if expires.Before(time.Now()) { - if err := t.tokenProvider.Refresh(); err != nil { - return nil, err - } - token = t.tokenProvider.Token() - } - - return auth.NewToken(auth.CBSTokenTypeJWT, token.AccessToken, string(token.ExpiresOn)), nil -} - -func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { - privateKey, certificate, err := pkcs12.Decode(pkcs, password) - if err != nil { - return nil, nil, err - } - - rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) - if !isRsaKey { - return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") - } - - return certificate, rsaPrivateKey, nil -} - -func azureEnvFromEnvironment() (*azure.Environment, error) { - envName := os.Getenv("AZURE_ENVIRONMENT") - - var env azure.Environment - if envName == "" { - env = azure.PublicCloud - } else { - var err error - env, err = azure.EnvironmentFromName(envName) - if err != nil { - return nil, err - } - } - return &env, nil -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/azure-pipelines.yml b/vendor/github.com/Azure/azure-amqp-common-go/v4/azure-pipelines.yml deleted file mode 100644 index eb7261095cc..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/azure-pipelines.yml +++ /dev/null @@ -1,96 +0,0 @@ -variables: - GOPATH: '$(system.defaultWorkingDirectory)/work' - sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' - GO111MODULE: 'on' - -jobs: - - job: 'azureamqpcommongo' - displayName: 'Run azure-amqp-common-go CI Checks' - - strategy: - matrix: - Linux_Go118: - vm.image: 'ubuntu-20.04' - go.version: '1.18.8' - Linux_Go119: - vm.image: 'ubuntu-20.04' - go.version: '1.19.3' - - pool: - vmImage: '$(vm.image)' - - steps: - - task: GoTool@0 - inputs: - version: '$(go.version)' - displayName: "Select Go Version" - - - script: | - set -e - mkdir -p '$(GOPATH)/bin' - mkdir -p '$(sdkPath)' - shopt -s extglob - mv !(work) '$(sdkPath)' - echo '##vso[task.prependpath]$(GOPATH)/bin' - go version - displayName: 'Create Go Workspace' - - - script: | - set -e - go install github.com/jstemmer/go-junit-report@v0.9.1 - go install github.com/axw/gocov/gocov@v1.1.0 - go install github.com/AlekSi/gocov-xml@v1.0.0 - go install github.com/matm/gocov-html@v0.0.0-20200509184451-71874e2e203b - go install github.com/fzipp/gocyclo/cmd/gocyclo@v0.6.0 - workingDirectory: '$(sdkPath)' - displayName: 'Install Dependencies' - - - script: | - go build -v ./... - workingDirectory: '$(sdkPath)' - displayName: 'Build' - - - script: | - go vet ./... - workingDirectory: '$(sdkPath)' - displayName: 'Vet' - - - script: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.50.1 - golangci-lint --version - golangci-lint run - workingDirectory: '$(sdkPath)' - displayName: 'Lint' - - - script: | - gocyclo -over 19 . - workingDirectory: '$(sdkPath)' - displayName: 'Cyclo' - - - script: | - set -e - go test -race -v -coverprofile=coverage.txt -covermode atomic ./... 2>&1 | go-junit-report > report.xml - gocov convert coverage.txt > coverage.json - gocov-xml < coverage.json > coverage.xml - gocov-html < coverage.json > coverage.html - workingDirectory: '$(sdkPath)' - displayName: 'Run Tests' - - - script: | - gofmt -s -l -w . >&2 - workingDirectory: '$(sdkPath)' - displayName: 'Format Check' - failOnStderr: true - condition: succeededOrFailed() - - - task: PublishTestResults@2 - inputs: - testRunner: JUnit - testResultsFiles: $(sdkPath)/report.xml - failTaskOnFailedTests: true - - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - summaryFileLocation: $(sdkPath)/coverage.xml - additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/cbs/cbs.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/cbs/cbs.go deleted file mode 100644 index 48eebcb882e..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/cbs/cbs.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package cbs provides the functionality for negotiating claims-based security over AMQP for use in Azure Service Bus -// and Event Hubs. -package cbs - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "context" - "fmt" - "time" - - "github.com/devigned/tab" - - "github.com/Azure/azure-amqp-common-go/v4/auth" - "github.com/Azure/azure-amqp-common-go/v4/internal/tracing" - "github.com/Azure/azure-amqp-common-go/v4/rpc" - "github.com/Azure/go-amqp" -) - -const ( - cbsAddress = "$cbs" - cbsOperationKey = "operation" - cbsOperationPutToken = "put-token" - cbsTokenTypeKey = "type" - cbsAudienceKey = "name" - cbsExpirationKey = "expiration" -) - -// NegotiateClaim attempts to put a token to the $cbs management endpoint to negotiate auth for the given audience -func NegotiateClaim(ctx context.Context, audience string, conn *amqp.Conn, provider auth.TokenProvider) error { - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.cbs.NegotiateClaim") - defer span.End() - - link, err := rpc.NewLink(ctx, conn, cbsAddress) - if err != nil { - tab.For(ctx).Error(err) - return err - } - defer func() { - if err := link.Close(ctx); err != nil { - tab.For(ctx).Error(err) - } - }() - - token, err := provider.GetToken(audience) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - tab.For(ctx).Debug(fmt.Sprintf("negotiating claim for audience %s with token type %s and expiry of %s", audience, token.TokenType, token.Expiry)) - msg := &amqp.Message{ - Value: token.Token, - ApplicationProperties: map[string]interface{}{ - cbsOperationKey: cbsOperationPutToken, - cbsTokenTypeKey: string(token.TokenType), - cbsAudienceKey: audience, - cbsExpirationKey: token.Expiry, - }, - } - - res, err := link.RetryableRPC(ctx, 3, 1*time.Second, msg) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - tab.For(ctx).Debug(fmt.Sprintf("negotiated with response code %d and message: %s", res.Code, res.Description)) - return nil -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/changelog.md b/vendor/github.com/Azure/azure-amqp-common-go/v4/changelog.md deleted file mode 100644 index e9cc595b874..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/changelog.md +++ /dev/null @@ -1,122 +0,0 @@ -# Change Log - -## `v4.2.0` -- Update to the GA verison of go-amqp - -## `v4.1.0` -- Update to the latest go-amqp - [PR#72](https://github.com/Azure/azure-amqp-common-go/pull/72) - -## `v4.0.0` -- Updated to the latest go-amqp which includes a few minor changes in public surface area. - [PR#68](https://github.com/Azure/azure-amqp-common-go/pull/68) - -## `v3.2.0` -- Change the default credits for the RPC link to be more reasonable (1000) - [PR#54](https://github.com/Azure/azure-amqp-common-go/pull/54) - -## `v3.1.2` -- Fixing a potential race condition when an RPC link is shut down while still sending requests - or handling responses. - [PR#55](https://github.com/Azure/azure-amqp-common-go/pull/55) -- Upgrading to go-amqp v0.13.13, which fixes an issue with simultaneous settling on the rpc link. - -## `v3.1.1` -- Change `Link` so it can handle parallel requests. - [PR#52](https://github.com/Azure/azure-amqp-common-go/pull/52) - -## `v3.1.0` -- Add support for passing managed identity user-assigned client ID. - -## `v3.0.1` -- add context to message deposition methods -- update dependencies - -## `v3.0.0` -- switch from pack.ag/amqp to github.com/Azure/go-amqp -- bump major version - -## `v2.1.1` -- bump amqp to v0.12.1 -- bump azure sdk for go to v32.5.0 -- bump go-autorest - -## `v2.1.0` -- add session filters for RPC links -- bump amqp to v0.11.2 -- add more logging in RPC operations - -## `v2.0.0` -- [**breaking change** remove persist and move into the Event Hubs package](https://github.com/Azure/azure-event-hubs-go/pull/112) -- **breaking change** remove log package in favor of https://github.com/devigned/tab - -## `v1.1.4` -- allow status description on RPC calls to be empty without returning an error https://github.com/Azure/azure-event-hubs-go/issues/88 - -## `v1.1.3` -- adding automatic server-timeout field for `rpc` package. It gleans the appropriate value from the context passed to it - -## `v1.1.2` -- adopting go modules - -## `v1.1.1` -- broadening accepted versions of pack.ag/amqp - -## `v1.1.0` - -- adding the ability to reuse an AMQP session while making RPCs -- bug fixes - -## `v1.0.3` -- updating dependencies, adding new 'go-autorest' constraint - -## `v1.0.2` -- adding resiliency against malformed "status-code" and "status-description" properties in rpc responses - -## `v1.0.1` -- bump version constant - -## `v1.0.0` -- moved to opencensus from opentracing -- committing to backward compatibility - -## `v0.7.0` -- update AMQP dependency to 0.7.0 - -## `v0.6.0` -- **Breaking Change** change the parse connection signature and make it more strict -- fix errors imports - -## `v0.5.0` -- **Breaking Change** lock dependency to AMQP - -## `v0.4.0` -- **Breaking Change** remove namespace from SAS provider and return struct rather than interface - -## `v0.3.2` -- Return error on retry. Was returning nil if not retryable. - -## `v0.3.1` -- Fix missing defer on spans - -## `v0.3.0` -- add opentracing support -- upgrade amqp to pull in the changes where close accepts context (breaking change) - -## `v0.2.4` -- connection string keys are case insensitive - -## `v0.2.3` -- handle remove trailing slash from host - -## `v0.2.2` -- handle connection string values which contain `=` - -## `v0.2.1` -- parse connection strings using key / values rather than regex - -## `v0.2.0` -- add file checkpoint persister - -## `v0.1.0` -- initial release \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/conn/conn.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/conn/conn.go deleted file mode 100644 index 4d539cf910e..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/conn/conn.go +++ /dev/null @@ -1,112 +0,0 @@ -package conn - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "errors" - "fmt" - "net/url" - "strings" -) - -const ( - endpointKey = "Endpoint" - sharedAccessKeyNameKey = "SharedAccessKeyName" - sharedAccessKeyKey = "SharedAccessKey" - entityPathKey = "EntityPath" -) - -type ( - // ParsedConn is the structure of a parsed Service Bus or Event Hub connection string. - ParsedConn struct { - Host string - Suffix string - Namespace string - HubName string - KeyName string - Key string - } -) - -// newParsedConnection is a constructor for a parsedConn and verifies each of the inputs is non-null. -func newParsedConnection(namespace, suffix, hubName, keyName, key string) *ParsedConn { - return &ParsedConn{ - Host: "amqps://" + namespace + "." + suffix, - Suffix: suffix, - Namespace: namespace, - KeyName: keyName, - Key: key, - HubName: hubName, - } -} - -// ParsedConnectionFromStr takes a string connection string from the Azure portal and returns the parsed representation. -// The method will return an error if the Endpoint, SharedAccessKeyName or SharedAccessKey is empty. -func ParsedConnectionFromStr(connStr string) (*ParsedConn, error) { - var namespace, suffix, hubName, keyName, secret string - splits := strings.Split(connStr, ";") - for _, split := range splits { - keyAndValue := strings.Split(split, "=") - if len(keyAndValue) < 2 { - return nil, errors.New("failed parsing connection string due to unmatched key value separated by '='") - } - - // if a key value pair has `=` in the value, recombine them - key := keyAndValue[0] - value := strings.Join(keyAndValue[1:], "=") - switch { - case strings.EqualFold(endpointKey, key): - u, err := url.Parse(value) - if err != nil { - return nil, errors.New("failed parsing connection string due to an incorrectly formatted Endpoint value") - } - hostSplits := strings.Split(u.Host, ".") - if len(hostSplits) < 2 { - return nil, errors.New("failed parsing connection string due to Endpoint value not containing a URL with a namespace and a suffix") - } - namespace = hostSplits[0] - suffix = strings.Join(hostSplits[1:], ".") - case strings.EqualFold(sharedAccessKeyNameKey, key): - keyName = value - case strings.EqualFold(sharedAccessKeyKey, key): - secret = value - case strings.EqualFold(entityPathKey, key): - hubName = value - } - } - - parsed := newParsedConnection(namespace, suffix, hubName, keyName, secret) - if namespace == "" { - return parsed, fmt.Errorf("key %q must not be empty", endpointKey) - } - - if keyName == "" { - return parsed, fmt.Errorf("key %q must not be empty", sharedAccessKeyNameKey) - } - - if secret == "" { - return parsed, fmt.Errorf("key %q must not be empty", sharedAccessKeyKey) - } - - return parsed, nil -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/internal/tracing/tracing.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/internal/tracing/tracing.go deleted file mode 100644 index 6c4c3e2f6e4..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/internal/tracing/tracing.go +++ /dev/null @@ -1,32 +0,0 @@ -package tracing - -import ( - "context" - "os" - - "github.com/devigned/tab" - - "github.com/Azure/azure-amqp-common-go/v4/internal" -) - -// StartSpanFromContext starts a span given a context and applies common library information -func StartSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) { - ctx, span := tab.StartSpan(ctx, operationName) - ApplyComponentInfo(span) - return ctx, span -} - -// ApplyComponentInfo applies eventhub library and network info to the span -func ApplyComponentInfo(span tab.Spanner) { - span.AddAttributes( - tab.StringAttribute("component", "github.com/Azure/azure-amqp-common-go"), - tab.StringAttribute("version", common.Version)) - applyNetworkInfo(span) -} - -func applyNetworkInfo(span tab.Spanner) { - hostname, err := os.Hostname() - if err == nil { - span.AddAttributes(tab.StringAttribute("peer.hostname", hostname)) - } -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/internal/version.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/internal/version.go deleted file mode 100644 index a860e17c1da..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/internal/version.go +++ /dev/null @@ -1,6 +0,0 @@ -package common - -const ( - // Version is the semantic version of the library - Version = "4.2.0" -) diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/ptrs.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/ptrs.go deleted file mode 100644 index 17327900d8e..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/ptrs.go +++ /dev/null @@ -1,44 +0,0 @@ -package common - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -// PtrBool takes a boolean and returns a pointer to that bool. For use in literal pointers, ptrBool(true) -> *bool -func PtrBool(toPtr bool) *bool { - return &toPtr -} - -// PtrString takes a string and returns a pointer to that string. For use in literal pointers, -// PtrString(fmt.Sprintf("..", foo)) -> *string -func PtrString(toPtr string) *string { - return &toPtr -} - -// PtrInt32 takes a int32 and returns a pointer to that int32. For use in literal pointers, ptrInt32(1) -> *int32 -func PtrInt32(number int32) *int32 { - return &number -} - -// PtrInt64 takes a int64 and returns a pointer to that int64. For use in literal pointers, ptrInt64(1) -> *int64 -func PtrInt64(number int64) *int64 { - return &number -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/retry.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/retry.go deleted file mode 100644 index 3d9edc1af0f..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/retry.go +++ /dev/null @@ -1,54 +0,0 @@ -package common - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "time" -) - -// Retryable represents an error which should be able to be retried -type Retryable string - -// Error implementation for Retryable -func (r Retryable) Error() string { - return string(r) -} - -// Retry will attempt to retry an action a number of times if the action returns a retryable error -func Retry(times int, delay time.Duration, action func() (interface{}, error)) (interface{}, error) { - var lastErr error - for i := 0; i < times; i++ { - item, err := action() - if err != nil { - if retryable, ok := err.(Retryable); ok { - lastErr = retryable - time.Sleep(delay) - continue - } else { - return nil, err - } - } - return item, nil - } - return nil, lastErr -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/rpc/rpc.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/rpc/rpc.go deleted file mode 100644 index 0e098b12155..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/rpc/rpc.go +++ /dev/null @@ -1,506 +0,0 @@ -// Package rpc provides functionality for request / reply messaging. It is used by package mgmt and cbs. -package rpc - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "time" - - "github.com/devigned/tab" - - common "github.com/Azure/azure-amqp-common-go/v4" - "github.com/Azure/azure-amqp-common-go/v4/internal/tracing" - "github.com/Azure/azure-amqp-common-go/v4/uuid" - "github.com/Azure/go-amqp" -) - -const ( - replyPostfix = "-reply-to-" - statusCodeKey = "status-code" - descriptionKey = "status-description" - defaultReceiverCredits = 1000 -) - -type ( - // Link is the bidirectional communication structure used for CBS negotiation - Link struct { - session *amqp.Session - - receiver amqpReceiver // *amqp.Receiver - sender amqpSender // *amqp.Sender - - clientAddress string - sessionID *string - useSessionID bool - id string - - responseMu sync.Mutex - startResponseRouterOnce *sync.Once - responseMap map[string]chan rpcResponse - - // for unit tests - uuidNewV4 func() (uuid.UUID, error) - messageAccept func(ctx context.Context, message *amqp.Message) error - } - - // Response is the simplified response structure from an RPC like call - Response struct { - Code int - Description string - Message *amqp.Message - } - - // LinkOption provides a way to customize the construction of a Link - LinkOption func(link *Link) error - - rpcResponse struct { - message *amqp.Message - err error - } - - // Actually: *amqp.Receiver - amqpReceiver interface { - Receive(ctx context.Context, o *amqp.ReceiveOptions) (*amqp.Message, error) - Close(ctx context.Context) error - } - - amqpSender interface { - Send(ctx context.Context, msg *amqp.Message, o *amqp.SendOptions) error - Close(ctx context.Context) error - } -) - -// LinkWithSessionFilter configures a Link to use a session filter -func LinkWithSessionFilter(sessionID *string) LinkOption { - return func(l *Link) error { - l.sessionID = sessionID - l.useSessionID = true - return nil - } -} - -// NewLink will build a new request response link -func NewLink(ctx context.Context, conn *amqp.Conn, address string, opts ...LinkOption) (*Link, error) { - authSession, err := conn.NewSession(ctx, nil) - if err != nil { - return nil, err - } - - return NewLinkWithSession(ctx, authSession, address, opts...) -} - -// NewLinkWithSession will build a new request response link, but will reuse an existing AMQP session -func NewLinkWithSession(ctx context.Context, session *amqp.Session, address string, opts ...LinkOption) (*Link, error) { - linkID, err := uuid.NewV4() - if err != nil { - return nil, err - } - - id := linkID.String() - link := &Link{ - session: session, - clientAddress: strings.Replace("$", "", address, -1) + replyPostfix + id, - id: id, - - uuidNewV4: uuid.NewV4, - responseMap: map[string]chan rpcResponse{}, - startResponseRouterOnce: &sync.Once{}, - } - - for _, opt := range opts { - if err := opt(link); err != nil { - return nil, err - } - } - - sender, err := session.NewSender(ctx, address, nil) - if err != nil { - return nil, err - } - - receiverOpts := amqp.ReceiverOptions{ - Credit: defaultReceiverCredits, - TargetAddress: link.clientAddress, - } - - if link.sessionID != nil { - const name = "com.microsoft:session-filter" - const code = uint64(0x00000137000000C) - if link.sessionID == nil { - receiverOpts.Filters = append(receiverOpts.Filters, amqp.NewLinkFilter(name, code, nil)) - } else { - receiverOpts.Filters = append(receiverOpts.Filters, amqp.NewLinkFilter(name, code, link.sessionID)) - } - } - - receiver, err := session.NewReceiver(ctx, address, &receiverOpts) - if err != nil { - // make sure we close the sender - clsCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - _ = sender.Close(clsCtx) - return nil, err - } - - link.sender = sender - link.receiver = receiver - link.messageAccept = receiver.AcceptMessage - - return link, nil -} - -// RetryableRPC attempts to retry a request a number of times with delay -func (l *Link) RetryableRPC(ctx context.Context, times int, delay time.Duration, msg *amqp.Message) (*Response, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.RetryableRPC") - defer span.End() - - res, err := common.Retry(times, delay, func() (interface{}, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.RetryableRPC.retry") - defer span.End() - - res, err := l.RPC(ctx, msg) - - if err != nil { - tab.For(ctx).Error(fmt.Errorf("error in RPC via link %s: %v", l.id, err)) - return nil, err - } - - switch { - case res.Code >= 200 && res.Code < 300: - tab.For(ctx).Debug(fmt.Sprintf("successful rpc on link %s: status code %d and description: %s", l.id, res.Code, res.Description)) - return res, nil - case res.Code >= 500: - errMessage := fmt.Sprintf("server error link %s: status code %d and description: %s", l.id, res.Code, res.Description) - tab.For(ctx).Error(errors.New(errMessage)) - return nil, common.Retryable(errMessage) - default: - errMessage := fmt.Sprintf("unhandled error link %s: status code %d and description: %s", l.id, res.Code, res.Description) - tab.For(ctx).Error(errors.New(errMessage)) - return nil, common.Retryable(errMessage) - } - }) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - return res.(*Response), nil -} - -// startResponseRouter is responsible for taking any messages received on the 'response' -// link and forwarding it to the proper channel. The channel is being select'd by the -// original `RPC` call. -func (l *Link) startResponseRouter() { - for { - res, err := l.receiver.Receive(context.Background(), nil) - - // You'll see this when the link is shutting down (either - // service-initiated via 'detach' or a user-initiated shutdown) - if isClosedError(err) { - l.broadcastError(err) - break - } else if err != nil { - // this is some transient error, sleep before trying again - time.Sleep(time.Second) - } - - // I don't believe this should happen. The JS version of this same code - // ignores errors as well since responses should always be correlated - // to actual send requests. So this is just here for completeness. - if res == nil { - continue - } - - autogenMessageId, ok := res.Properties.CorrelationID.(string) - - if !ok { - // TODO: it'd be good to track these in some way. We don't have a good way to - // forward this on at this point. - continue - } - - ch := l.deleteChannelFromMap(autogenMessageId) - - if ch != nil { - ch <- rpcResponse{message: res, err: err} - } - } -} - -// RPC sends a request and waits on a response for that request -func (l *Link) RPC(ctx context.Context, msg *amqp.Message) (*Response, error) { - l.startResponseRouterOnce.Do(func() { - go l.startResponseRouter() - }) - - copiedMessage, messageID, err := addMessageID(msg, l.uuidNewV4) - - if err != nil { - return nil, err - } - - // use the copiedMessage from this point - msg = copiedMessage - - const altStatusCodeKey, altDescriptionKey = "statusCode", "statusDescription" - - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.RPC") - defer span.End() - - msg.Properties.ReplyTo = &l.clientAddress - - if msg.ApplicationProperties == nil { - msg.ApplicationProperties = make(map[string]interface{}) - } - - if _, ok := msg.ApplicationProperties["server-timeout"]; !ok { - if deadline, ok := ctx.Deadline(); ok { - msg.ApplicationProperties["server-timeout"] = uint(time.Until(deadline) / time.Millisecond) - } - } - - responseCh := l.addChannelToMap(messageID) - - if responseCh == nil { - return nil, &amqp.LinkError{} - } - - err = l.sender.Send(ctx, msg, nil) - - if err != nil { - l.deleteChannelFromMap(messageID) - tab.For(ctx).Error(err) - return nil, err - } - - var res *amqp.Message - - select { - case <-ctx.Done(): - l.deleteChannelFromMap(messageID) - res, err = nil, ctx.Err() - case resp := <-responseCh: - // this will get triggered by the loop in 'startReceiverRouter' when it receives - // a message with our autoGenMessageID set in the correlation_id property. - res, err = resp.message, resp.err - } - - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - var statusCode int - statusCodeCandidates := []string{statusCodeKey, altStatusCodeKey} - for i := range statusCodeCandidates { - if rawStatusCode, ok := res.ApplicationProperties[statusCodeCandidates[i]]; ok { - if cast, ok := rawStatusCode.(int32); ok { - statusCode = int(cast) - break - } else { - err := errors.New("status code was not of expected type int32") - tab.For(ctx).Error(err) - return nil, err - } - } - } - if statusCode == 0 { - err := errors.New("status codes was not found on rpc message") - tab.For(ctx).Error(err) - return nil, err - } - - var description string - descriptionCandidates := []string{descriptionKey, altDescriptionKey} - for i := range descriptionCandidates { - if rawDescription, ok := res.ApplicationProperties[descriptionCandidates[i]]; ok { - if description, ok = rawDescription.(string); ok || rawDescription == nil { - break - } else { - return nil, errors.New("status description was not of expected type string") - } - } - } - - span.AddAttributes(tab.StringAttribute("http.status_code", fmt.Sprintf("%d", statusCode))) - - response := &Response{ - Code: int(statusCode), - Description: description, - Message: res, - } - - if err := l.messageAccept(ctx, res); err != nil { - tab.For(ctx).Error(err) - return response, err - } - - return response, err -} - -// Close the link receiver, sender and session -func (l *Link) Close(ctx context.Context) error { - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.Close") - defer span.End() - - if err := l.closeReceiver(ctx); err != nil { - _ = l.closeSender(ctx) - _ = l.closeSession(ctx) - return err - } - - if err := l.closeSender(ctx); err != nil { - _ = l.closeSession(ctx) - return err - } - - return l.closeSession(ctx) -} - -func (l *Link) closeReceiver(ctx context.Context) error { - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.closeReceiver") - defer span.End() - - if l.receiver != nil { - return l.receiver.Close(ctx) - } - return nil -} - -func (l *Link) closeSender(ctx context.Context) error { - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.closeSender") - defer span.End() - - if l.sender != nil { - return l.sender.Close(ctx) - } - return nil -} - -func (l *Link) closeSession(ctx context.Context) error { - ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.closeSession") - defer span.End() - - if l.session != nil { - return l.session.Close(ctx) - } - return nil -} - -// addChannelToMap adds a channel which will be used by the response router to -// notify when there is a response to the request. -// If l.responseMap is nil (for instance, via broadcastError) this function will -// return nil. -func (l *Link) addChannelToMap(messageID string) chan rpcResponse { - l.responseMu.Lock() - defer l.responseMu.Unlock() - - if l.responseMap == nil { - return nil - } - - responseCh := make(chan rpcResponse, 1) - l.responseMap[messageID] = responseCh - - return responseCh -} - -// deleteChannelFromMap removes the message from our internal map and returns -// a channel that the corresponding RPC() call is waiting on. -// If l.responseMap is nil (for instance, via broadcastError) this function will -// return nil. -func (l *Link) deleteChannelFromMap(messageID string) chan rpcResponse { - l.responseMu.Lock() - defer l.responseMu.Unlock() - - if l.responseMap == nil { - return nil - } - - ch := l.responseMap[messageID] - delete(l.responseMap, messageID) - - return ch -} - -// broadcastError notifies the anyone waiting for a response that the link/session/connection -// has closed. -func (l *Link) broadcastError(err error) { - l.responseMu.Lock() - defer l.responseMu.Unlock() - - for _, ch := range l.responseMap { - ch <- rpcResponse{err: err} - } - - l.responseMap = nil -} - -// addMessageID generates a unique UUID for the message. When the service -// responds it will fill out the correlation ID property of the response -// with this ID, allowing us to link the request and response together. -// -// NOTE: this function copies 'message', adding in a 'Properties' object -// if it does not already exist. -func addMessageID(message *amqp.Message, uuidNewV4 func() (uuid.UUID, error)) (*amqp.Message, string, error) { - uuid, err := uuidNewV4() - - if err != nil { - return nil, "", err - } - - autoGenMessageID := uuid.String() - - // we need to modify the message so we'll make a copy - copiedMessage := *message - - if message.Properties == nil { - copiedMessage.Properties = &amqp.MessageProperties{ - MessageID: autoGenMessageID, - } - } else { - // properties already exist, make a copy and then update - // the message ID - copiedProperties := *message.Properties - copiedProperties.MessageID = autoGenMessageID - - copiedMessage.Properties = &copiedProperties - } - - return &copiedMessage, autoGenMessageID, nil -} - -func isClosedError(err error) bool { - var connError *amqp.ConnError - var sessionError *amqp.SessionError - var linkError *amqp.LinkError - - return (errors.As(err, &linkError) && linkError.RemoteErr == nil) || - errors.As(err, &sessionError) || - errors.As(err, &connError) -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/sas/sas.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/sas/sas.go deleted file mode 100644 index d02b339d07c..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/sas/sas.go +++ /dev/null @@ -1,158 +0,0 @@ -// Package sas provides SAS token functionality which implements TokenProvider from package auth for use with Azure -// Event Hubs and Service Bus. -package sas - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/Azure/azure-amqp-common-go/v4/auth" - "github.com/Azure/azure-amqp-common-go/v4/conn" -) - -type ( - // Signer provides SAS token generation for use in Service Bus and Event Hub - Signer struct { - KeyName string - Key string - } - - // TokenProvider is a SAS claims-based security token provider - TokenProvider struct { - signer *Signer - } - - // TokenProviderOption provides configuration options for SAS Token Providers - TokenProviderOption func(*TokenProvider) error -) - -// TokenProviderWithEnvironmentVars creates a new SAS TokenProvider from environment variables -// -// # There are two sets of environment variables which can produce a SAS TokenProvider -// -// 1) Expected Environment Variables: -// - "EVENTHUB_KEY_NAME" the name of the Event Hub key -// - "EVENTHUB_KEY_VALUE" the secret for the Event Hub key named in "EVENTHUB_KEY_NAME" -// -// 2) Expected Environment Variable: -// - "EVENTHUB_CONNECTION_STRING" connection string from the Azure portal -// -// looks like: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234= -func TokenProviderWithEnvironmentVars() TokenProviderOption { - return func(provider *TokenProvider) error { - connStr := os.Getenv("EVENTHUB_CONNECTION_STRING") - if connStr != "" { - parsed, err := conn.ParsedConnectionFromStr(connStr) - if err != nil { - return err - } - provider.signer = NewSigner(parsed.KeyName, parsed.Key) - return nil - } - - var ( - keyName = os.Getenv("EVENTHUB_KEY_NAME") - keyValue = os.Getenv("EVENTHUB_KEY_VALUE") - ) - - if keyName == "" || keyValue == "" { - return errors.New("unable to build SAS token provider because (EVENTHUB_KEY_NAME and EVENTHUB_KEY_VALUE) were empty, and EVENTHUB_CONNECTION_STRING was empty") - } - provider.signer = NewSigner(keyName, keyValue) - return nil - } -} - -// TokenProviderWithKey configures a SAS TokenProvider to use the given key name and key (secret) for signing -func TokenProviderWithKey(keyName, key string) TokenProviderOption { - return func(provider *TokenProvider) error { - provider.signer = NewSigner(keyName, key) - return nil - } -} - -// NewTokenProvider builds a SAS claims-based security token provider -func NewTokenProvider(opts ...TokenProviderOption) (*TokenProvider, error) { - provider := new(TokenProvider) - for _, opt := range opts { - err := opt(provider) - if err != nil { - return nil, err - } - } - return provider, nil -} - -// GetToken gets a CBS SAS token -func (t *TokenProvider) GetToken(audience string) (*auth.Token, error) { - signature, expiry := t.signer.SignWithDuration(audience, 2*time.Hour) - return auth.NewToken(auth.CBSTokenTypeSAS, signature, expiry), nil -} - -// NewSigner builds a new SAS signer for use in generation Service Bus and Event Hub SAS tokens -func NewSigner(keyName, key string) *Signer { - return &Signer{ - KeyName: keyName, - Key: key, - } -} - -// SignWithDuration signs a given for a period of time from now -func (s *Signer) SignWithDuration(uri string, interval time.Duration) (signature, expiry string) { - expiry = signatureExpiry(time.Now().UTC(), interval) - return s.SignWithExpiry(uri, expiry), expiry -} - -// SignWithExpiry signs a given uri with a given expiry string -func (s *Signer) SignWithExpiry(uri, expiry string) string { - audience := strings.ToLower(url.QueryEscape(uri)) - sts := stringToSign(audience, expiry) - sig := s.signString(sts) - return fmt.Sprintf("SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s", audience, sig, expiry, s.KeyName) -} - -func signatureExpiry(from time.Time, interval time.Duration) string { - t := from.Add(interval).Round(time.Second).Unix() - return strconv.FormatInt(t, 10) -} - -func stringToSign(uri, expiry string) string { - return uri + "\n" + expiry -} - -func (s *Signer) signString(str string) string { - h := hmac.New(sha256.New, []byte(s.Key)) - h.Write([]byte(str)) - encodedSig := base64.StdEncoding.EncodeToString(h.Sum(nil)) - return url.QueryEscape(encodedSig) -} diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v4/uuid/uuid.go b/vendor/github.com/Azure/azure-amqp-common-go/v4/uuid/uuid.go deleted file mode 100644 index 8d1f5f74145..00000000000 --- a/vendor/github.com/Azure/azure-amqp-common-go/v4/uuid/uuid.go +++ /dev/null @@ -1,72 +0,0 @@ -package uuid - -import ( - "crypto/rand" - "encoding/hex" -) - -// Size of a UUID in bytes. -const Size = 16 - -// UUID versions -const ( - _ byte = iota - _ - _ - _ - V4 - _ - - _ byte = iota - VariantRFC4122 -) - -type ( - // UUID representation compliant with specification - // described in RFC 4122. - UUID [Size]byte -) - -var ( - randomReader = rand.Reader - - // Nil is special form of UUID that is specified to have all - // 128 bits set to zero. - Nil = UUID{} -) - -// NewV4 returns random generated UUID. -func NewV4() (UUID, error) { - u := UUID{} - if _, err := randomReader.Read(u[:]); err != nil { - return Nil, err - } - u.setVersion(V4) - u.setVariant(VariantRFC4122) - - return u, nil -} - -func (u *UUID) setVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -func (u *UUID) setVariant(v byte) { - u[8] = u[8]&(0xff>>2) | (0x02 << 6) -} - -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/.gitignore b/vendor/github.com/Azure/azure-event-hubs-go/v3/.gitignore deleted file mode 100644 index 8d18833bfb5..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -vendor -.idea - -.DS_Store - -.env - -# Test Infrastructure -terraform.tfvars -*.auto.tfvars -*.tfstate -*.tfstate.backup -.terraform/ -.terraform.tfstate.lock.info \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/Makefile b/vendor/github.com/Azure/azure-event-hubs-go/v3/Makefile deleted file mode 100644 index 0e0f9194bc2..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/Makefile +++ /dev/null @@ -1,94 +0,0 @@ -PACKAGE = github.com/Azure/azure-event-hubs-go -DATE ?= $(shell date +%FT%T%z) -VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \ - cat $(CURDIR)/.version 2> /dev/null || echo v0) -BIN = $(GOPATH)/bin - -GO = go -GODOC = godoc -GOFMT = gofmt -GOCYCLO = gocyclo -GOLINT = $(BIN)/golangci-lint -GOSTATICCHECK = $(BIN)/staticcheck -GOJUNITRPT = go-junit-report - -V = 0 -Q = $(if $(filter 1,$V),,@) -M = $(shell printf "\033[34;1m▶\033[0m") -TIMEOUT = 720 - -.PHONY: all -all: fmt lint vet tidy build - -.PHONY: build -build: | ; $(info $(M) building library…) @ ## Build program - $Q $(GO) build all - -# Tests - -TEST_TARGETS := test-default test-bench test-verbose test-race test-debug test-cover test-full -.PHONY: $(TEST_TARGETS) test-xml check test tests -test-bench: ARGS=-run=__absolutelynothing__ -bench=. ## Run benchmarks -test-verbose: ARGS=-v ## Run tests in verbose mode -test-debug: ARGS=-v -debug ## Run tests in verbose mode with debug output -test-race: ARGS=-race ## Run tests with race detector -test-cover: ARGS=-cover -coverprofile=cover.out -v ## Run tests in verbose mode with coverage -test-full: ARGS=-cover -coverprofile=cover.out -v -race ## Run tests with code coverage and race detection -$(TEST_TARGETS): NAME=$(MAKECMDGOALS:test-%=%) -$(TEST_TARGETS): test -check test tests: cyclo lint vet terraform.tfstate; $(info $(M) running $(NAME:%=% )tests…) @ ## Run tests - $(GO) test -timeout $(TIMEOUT)s $(ARGS) ./... 2>&1 | tee gotestoutput.log && \ - $(GOJUNITRPT) < gotestoutput.log > report.xml && \ - rm -f gotestoutput.log - -.PHONY: vet -vet: ; $(info $(M) running vet…) @ ## Run vet - $Q $(GO) vet ./... - -.PHONY: tidy -tidy: ; $(info $(M) running go mod tidy…) @ ## Run tidy - $Q $(GO) mod tidy - -.PHONY: lint -lint: ; $(info $(M) running golangci-lint…) @ ## Run golangci-lint - $Q $(GOLINT) run --skip-dirs "internal/azure-storage-blob-go" - -.PHONY: staticcheck -staticcheck: ; $(info $(M) running staticcheck…) @ ## Run staticcheck - $Q $(GOSTATICCHECK) ./... - -.PHONY: fmt -fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files - @ret=0 && for d in $$($(GO) list -f '{{.Dir}}' ./... | grep -v /vendor/); do \ - $(GOFMT) -l -w $$d/*.go || ret=$$? ; \ - done ; exit $$ret - -.PHONY: cyclo -cyclo: ; $(info $(M) running gocyclo...) @ ## Run gocyclo on all source files - $Q $(GOCYCLO) -over 19 -ignore "internal/azure-storage-blob-go" . - -terraform.tfstate: azuredeploy.tf $(wildcard terraform.tfvars) .terraform ; $(info $(M) running terraform...) @ ## Run terraform to provision infrastructure needed for testing - $Q TF_VAR_azure_client_secret="$${ARM_CLIENT_SECRET}" terraform apply -auto-approve - $Q terraform output -json | jq -r 'keys[] as $$k | "\($$k) = \(.[$$k].value)"' > .env - -.terraform: - $Q terraform init - -.Phony: destroy -destroy: ; $(info $(M) running terraform destroy...) - $(Q) terraform destroy --auto-approve - -# Misc - -.PHONY: clean -clean: ; $(info $(M) cleaning…) @ ## Cleanup everything - @rm -rf test/tests.* test/coverage.* - -.PHONY: help -help: - @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ - awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' - -.PHONY: version -version: - @echo $(VERSION) \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/SECURITY.md b/vendor/github.com/Azure/azure-event-hubs-go/v3/SECURITY.md deleted file mode 100644 index 869fdfe2b24..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/SECURITY.md +++ /dev/null @@ -1,41 +0,0 @@ - - -## Security - -Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). - -If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. - -## Reporting Security Issues - -**Please do not report security vulnerabilities through public GitHub issues.** - -Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). - -If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). - -You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). - -Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: - - * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) - * Full paths of source file(s) related to the manifestation of the issue - * The location of the affected source code (tag/branch/commit or direct URL) - * Any special configuration required to reproduce the issue - * Step-by-step instructions to reproduce the issue - * Proof-of-concept or exploit code (if possible) - * Impact of the issue, including how an attacker might exploit the issue - -This information will help us triage your report more quickly. - -If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. - -## Preferred Languages - -We prefer all communications to be in English. - -## Policy - -Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). - - diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/amqp_mgmt.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/amqp_mgmt.go deleted file mode 100644 index ddcd1d85024..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/amqp_mgmt.go +++ /dev/null @@ -1,188 +0,0 @@ -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "context" - "fmt" - "time" - - "github.com/Azure/azure-amqp-common-go/v4/rpc" - "github.com/Azure/go-amqp" - "github.com/devigned/tab" - "github.com/mitchellh/mapstructure" -) - -const ( - // MsftVendor is the Microsoft vendor identifier - MsftVendor = "com.microsoft" - entityTypeKey = "type" - entityNameKey = "name" - partitionNameKey = "partition" - securityTokenKey = "security_token" - eventHubEntityType = MsftVendor + ":eventhub" - partitionEntityType = MsftVendor + ":partition" - operationKey = "operation" - readOperationKey = "READ" - address = "$management" -) - -type ( - // client communicates with an AMQP management node - client struct { - namespace *namespace - hubName string - } - - // HubRuntimeInformation provides management node information about a given Event Hub instance - HubRuntimeInformation struct { - Path string `mapstructure:"name"` - CreatedAt time.Time `mapstructure:"created_at"` - PartitionCount int `mapstructure:"partition_count"` - PartitionIDs []string `mapstructure:"partition_ids"` - } - - // HubPartitionRuntimeInformation provides management node information about a given Event Hub partition - HubPartitionRuntimeInformation struct { - HubPath string `mapstructure:"name"` - PartitionID string `mapstructure:"partition"` - BeginningSequenceNumber int64 `mapstructure:"begin_sequence_number"` - LastSequenceNumber int64 `mapstructure:"last_enqueued_sequence_number"` - LastEnqueuedOffset string `mapstructure:"last_enqueued_offset"` - LastEnqueuedTimeUtc time.Time `mapstructure:"last_enqueued_time_utc"` - } -) - -// newClient constructs a new AMQP management client -func newClient(namespace *namespace, hubName string) *client { - return &client{ - namespace: namespace, - hubName: hubName, - } -} - -// GetHubRuntimeInformation requests runtime information for an Event Hub -func (c *client) GetHubRuntimeInformation(ctx context.Context, conn *amqp.Conn) (*HubRuntimeInformation, error) { - ctx, span := tab.StartSpan(ctx, "eh.mgmt.client.GetHubRuntimeInformation") - defer span.End() - - rpcLink, err := rpc.NewLink(ctx, conn, address) - if err != nil { - return nil, err - } - - msg := &amqp.Message{ - ApplicationProperties: map[string]interface{}{ - operationKey: readOperationKey, - entityTypeKey: eventHubEntityType, - entityNameKey: c.hubName, - }, - } - msg, err = c.addSecurityToken(msg) - if err != nil { - return nil, err - } - - res, err := rpcLink.RetryableRPC(ctx, 3, 1*time.Second, msg) - if err != nil { - return nil, err - } - - hubRuntimeInfo, err := newHubRuntimeInformation(res.Message) - if err != nil { - return nil, err - } - return hubRuntimeInfo, nil -} - -// GetHubPartitionRuntimeInformation fetches runtime information from the AMQP management node for a given partition -func (c *client) GetHubPartitionRuntimeInformation(ctx context.Context, conn *amqp.Conn, partitionID string) (*HubPartitionRuntimeInformation, error) { - ctx, span := tab.StartSpan(ctx, "eh.mgmt.client.GetHubPartitionRuntimeInformation") - defer span.End() - - rpcLink, err := rpc.NewLink(ctx, conn, address) - if err != nil { - return nil, err - } - - msg := &amqp.Message{ - ApplicationProperties: map[string]interface{}{ - operationKey: readOperationKey, - entityTypeKey: partitionEntityType, - entityNameKey: c.hubName, - partitionNameKey: partitionID, - }, - } - msg, err = c.addSecurityToken(msg) - if err != nil { - return nil, err - } - - res, err := rpcLink.RetryableRPC(ctx, 3, 1*time.Second, msg) - if err != nil { - return nil, err - } - - hubPartitionRuntimeInfo, err := newHubPartitionRuntimeInformation(res.Message) - if err != nil { - return nil, err - } - return hubPartitionRuntimeInfo, nil -} - -func (c *client) addSecurityToken(msg *amqp.Message) (*amqp.Message, error) { - token, err := c.namespace.tokenProvider.GetToken(c.getTokenAudience()) - if err != nil { - return nil, err - } - msg.ApplicationProperties[securityTokenKey] = token.Token - - return msg, nil -} - -func (c *client) getTokenAudience() string { - return c.namespace.getAmqpHostURI() + c.hubName -} - -func newHubPartitionRuntimeInformation(msg *amqp.Message) (*HubPartitionRuntimeInformation, error) { - values, ok := msg.Value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("values were not map[string]interface{}, it was: %v", values) - } - - var partitionInfo HubPartitionRuntimeInformation - err := mapstructure.Decode(values, &partitionInfo) - return &partitionInfo, err -} - -// newHubRuntimeInformation constructs a new HubRuntimeInformation from an AMQP message -func newHubRuntimeInformation(msg *amqp.Message) (*HubRuntimeInformation, error) { - values, ok := msg.Value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("values were not map[string]interface{}, it was: %v", values) - } - - var runtimeInfo HubRuntimeInformation - err := mapstructure.Decode(values, &runtimeInfo) - return &runtimeInfo, err -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/atom/atom.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/atom/atom.go deleted file mode 100644 index 6e7a93f3162..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/atom/atom.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package atom contains base data structures for use in the Azure Event Hubs management HTTP API -package atom - -import ( - "encoding/xml" - - "github.com/Azure/go-autorest/autorest/date" -) - -type ( - // Feed is an Atom feed which contains entries - Feed struct { - XMLName xml.Name `xml:"feed"` - ID string `xml:"id"` - Title string `xml:"title"` - Updated *date.Time `xml:"updated,omitempty"` - Entries []Entry `xml:"entry"` - } - - // Entry is the Atom wrapper for a management request - Entry struct { - XMLName xml.Name `xml:"entry"` - ID string `xml:"id,omitempty"` - Title string `xml:"title,omitempty"` - Published *date.Time `xml:"published,omitempty"` - Updated *date.Time `xml:"updated,omitempty"` - Author *Author `xml:"author,omitempty"` - Link *Link `xml:"link,omitempty"` - Content *Content `xml:"content"` - DataServiceSchema string `xml:"xmlns:d,attr,omitempty"` - DataServiceMetadataSchema string `xml:"xmlns:m,attr,omitempty"` - AtomSchema string `xml:"xmlns,attr"` - } - - // Author is an Atom author used in an entry - Author struct { - XMLName xml.Name `xml:"author"` - Name *string `xml:"name,omitempty"` - } - - // Link is an Atom link used in an entry - Link struct { - XMLName xml.Name `xml:"link"` - Rel string `xml:"rel,attr"` - HREF string `xml:"href,attr"` - } - - // Content is a generic body for an Atom entry - Content struct { - XMLName xml.Name `xml:"content"` - Type string `xml:"type,attr"` - Body string `xml:",innerxml"` - } -) diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/azuredeploy.tf b/vendor/github.com/Azure/azure-event-hubs-go/v3/azuredeploy.tf deleted file mode 100644 index 55dd6eb87d4..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/azuredeploy.tf +++ /dev/null @@ -1,164 +0,0 @@ -provider "azuread" { - version = "~> 0.6" -} - -provider "azurerm" { - version = "~> 1.34" -} - -provider "random" { - version = "~> 2.2" -} - -variable "location" { - # eastus support AAD authentication, which at the time of writing this is in preview. - # see: https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-role-based-access-control - description = "Azure datacenter to deploy to." - default = "eastus" -} - -variable "eventhub_name_prefix" { - description = "Input your unique Azure Service Bus Namespace name" - default = "azureehtests" -} - -variable "resource_group_name_prefix" { - description = "Resource group to provision test infrastructure in." - default = "eventhub-go-tests" -} - -variable "azure_client_secret" { - description = "(Optional) piped in from env var so .env will be updated if there is an existing client secret" - default = "foo" -} - -# Data resources used to get SubID and Tennant Info -data "azurerm_client_config" "current" { -} - -resource "random_string" "name" { - length = 8 - upper = false - special = false - number = false -} - -# Create resource group for all of the things -resource "azurerm_resource_group" "test" { - name = "${var.resource_group_name_prefix}-${random_string.name.result}" - location = var.location -} - -# Create an Event Hub namespace for testing -resource "azurerm_eventhub_namespace" "test" { - name = "${var.eventhub_name_prefix}-${random_string.name.result}" - location = azurerm_resource_group.test.location - resource_group_name = azurerm_resource_group.test.name - sku = "standard" -} - -resource "azurerm_storage_account" "test" { - name = "${var.eventhub_name_prefix}${random_string.name.result}" - resource_group_name = azurerm_resource_group.test.name - location = azurerm_resource_group.test.location - account_replication_type = "LRS" - account_tier = "Standard" -} - -# Generate a random secret fo the service principal -resource "random_string" "secret" { - count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0 - length = 32 - upper = true - special = true - number = true -} - -// Application for AAD authentication -resource "azuread_application" "test" { - count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0 - name = "eventhubstest" - homepage = "https://eventhubstest-${random_string.name.result}" - identifier_uris = ["https://eventhubstest-${random_string.name.result}"] - reply_urls = ["https://eventhubstest-${random_string.name.result}"] - available_to_other_tenants = false - oauth2_allow_implicit_flow = true -} - -# Create a service principal, which represents a linkage between the AAD application and the password -resource "azuread_service_principal" "test" { - count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0 - application_id = azuread_application.test[0].application_id -} - -# Create a new service principal password which will be the AZURE_CLIENT_SECRET env var -resource "azuread_service_principal_password" "test" { - count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0 - service_principal_id = azuread_service_principal.test[0].id - value = random_string.secret[0].result - end_date = "2030-01-01T01:02:03Z" -} - -# This provides the new AAD application the rights to managed the resource group -resource "azurerm_role_assignment" "service_principal_rg" { - scope = "subscriptions/${data.azurerm_client_config.current.subscription_id}/resourceGroups/${azurerm_resource_group.test.name}" - role_definition_name = "Owner" - principal_id = data.azurerm_client_config.current.service_principal_application_id == "" ? azuread_service_principal.test[0].id : data.azurerm_client_config.current.service_principal_object_id -} - -# This provides the new AAD application the rights to managed, send and receive from the Event Hubs instance -resource "azurerm_role_assignment" "service_principal_eh" { - scope = "subscriptions/${data.azurerm_client_config.current.subscription_id}/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.EventHub/namespaces/${azurerm_eventhub_namespace.test.name}" - role_definition_name = "Azure Event Hubs Data Owner" - principal_id = data.azurerm_client_config.current.service_principal_application_id == "" ? azuread_service_principal.test[0].id : data.azurerm_client_config.current.service_principal_object_id - depends_on = [azurerm_eventhub_namespace.test] -} - - -output "TEST_EVENTHUB_RESOURCE_GROUP" { - value = azurerm_resource_group.test.name -} - -output "EVENTHUB_CONNECTION_STRING" { - value = "Endpoint=sb://${azurerm_eventhub_namespace.test.name}.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=${azurerm_eventhub_namespace.test.default_primary_key}" - sensitive = true -} - -output "EVENTHUB_NAMESPACE" { - value = azurerm_eventhub_namespace.test.name -} - -output "AZURE_SUBSCRIPTION_ID" { - value = data.azurerm_client_config.current.subscription_id -} - -output "TEST_EVENTHUB_LOCATION" { - value = var.location -} - -output "AZURE_TENANT_ID" { - value = data.azurerm_client_config.current.tenant_id -} - -output "AZURE_CLIENT_ID" { - value = compact( - concat( - azuread_application.test.*.application_id, - [data.azurerm_client_config.current.client_id] - ) - )[0] -} - -output "AZURE_CLIENT_SECRET" { - value = compact( - concat( - azuread_service_principal_password.test.*.value, - [var.azure_client_secret] - ) - )[0] - sensitive = true -} - -output "STORAGE_ACCOUNT_NAME" { - value = azurerm_storage_account.test.name -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/batch.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/batch.go deleted file mode 100644 index 0f31cb0a551..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/batch.go +++ /dev/null @@ -1,221 +0,0 @@ -package eventhub - -import ( - "errors" - - "github.com/Azure/azure-amqp-common-go/v4/uuid" - "github.com/Azure/go-amqp" -) - -type ( - // BatchOptions are optional information to add to a batch of messages - BatchOptions struct { - MaxSize MaxMessageSizeInBytes - } - - // BatchIterator offers a simple mechanism for batching a list of events - BatchIterator interface { - Done() bool - Next(messageID string, opts *BatchOptions) (*EventBatch, error) - } - - // EventBatchIterator provides an easy way to iterate over a slice of events to reliably create batches - EventBatchIterator struct { - Cursors map[string]int - PartitionEventsMap map[string][]*Event - } - - // EventBatch is a batch of Event Hubs messages to be sent - EventBatch struct { - *Event - marshaledMessages [][]byte - MaxSize MaxMessageSizeInBytes - size int - } - - // BatchOption provides a way to configure `BatchOptions` - BatchOption func(opt *BatchOptions) error - - // MaxMessageSizeInBytes is the max number of bytes allowed by Azure Service Bus - MaxMessageSizeInBytes uint -) - -const ( - // DefaultMaxMessageSizeInBytes is the maximum number of bytes in an event (https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-quotas) - DefaultMaxMessageSizeInBytes MaxMessageSizeInBytes = 1000000 - - batchMessageWrapperSize = 100 - // KeyOfNoPartitionKey is the key value in Events map for Events which do not have PartitionKey - KeyOfNoPartitionKey = "NoPartitionKey" -) - -// ErrMessageIsTooBig represents the error when one single event in the batch is bigger than the maximum batch size -var ErrMessageIsTooBig = errors.New("message is too big") - -// BatchWithMaxSizeInBytes configures the EventBatchIterator to fill the batch to the specified max size in bytes -func BatchWithMaxSizeInBytes(sizeInBytes int) BatchOption { - return func(batchOption *BatchOptions) error { - batchOption.MaxSize = MaxMessageSizeInBytes(sizeInBytes) - return nil - } -} - -// NewEventBatchIterator wraps a slice of `Event` pointers to allow it to be made into a `EventBatchIterator`. -func NewEventBatchIterator(events ...*Event) *EventBatchIterator { - partitionEventMap := make(map[string][]*Event) - cursors := make(map[string]int) - for _, event := range events { - var ok bool - var key string - if event.PartitionKey == nil { - key = KeyOfNoPartitionKey - } else { - key = *event.PartitionKey - } - if _, ok = partitionEventMap[key]; !ok { - cursors[key] = 0 - } - partitionEventMap[key] = append(partitionEventMap[key], event) - } - return &EventBatchIterator{ - Cursors: cursors, - PartitionEventsMap: partitionEventMap, - } -} - -// Done communicates whether there are more messages remaining to be iterated over. -func (ebi *EventBatchIterator) Done() bool { - for key, cursor := range ebi.Cursors { - if cursor != len(ebi.PartitionEventsMap[key]) { - return false - } - } - return true -} - -// Next fetches the batch of messages in the message slice at a position one larger than the last one accessed. -func (ebi *EventBatchIterator) Next(eventID string, opts *BatchOptions) (*EventBatch, error) { - var key string - for partitionKey, cursor := range ebi.Cursors { - if cursor != len(ebi.PartitionEventsMap[partitionKey]) { - key = partitionKey - } - } - if key == "" { - return nil, ErrNoMessages{} - } - - if opts == nil { - opts = &BatchOptions{ - MaxSize: DefaultMaxMessageSizeInBytes, - } - } - - events := ebi.PartitionEventsMap[key][ebi.Cursors[key]:] - eb := NewEventBatch(eventID, opts) - if key != KeyOfNoPartitionKey && len(events) > 0 { - eb.PartitionKey = events[0].PartitionKey - } - for _, event := range events { - ok, err := eb.Add(event) - if err != nil { - return nil, err - } - - if !ok { - if len(eb.marshaledMessages) == 0 { - ebi.Cursors[key]++ - return nil, ErrMessageIsTooBig - } - - return eb, nil - } - ebi.Cursors[key]++ - } - return eb, nil -} - -// NewEventBatch builds a new event batch -func NewEventBatch(eventID string, opts *BatchOptions) *EventBatch { - if opts == nil { - opts = &BatchOptions{ - MaxSize: DefaultMaxMessageSizeInBytes, - } - } - - mb := &EventBatch{ - MaxSize: opts.MaxSize, - Event: &Event{ - ID: eventID, - }, - } - - return mb -} - -// Add adds a message to the batch if the message will not exceed the max size of the batch -func (eb *EventBatch) Add(e *Event) (bool, error) { - e.PartitionKey = eb.PartitionKey - - msg, err := e.toMsg() - if err != nil { - return false, err - } - - if msg.Properties.MessageID == nil || msg.Properties.MessageID == "" { - uid, err := uuid.NewV4() - if err != nil { - return false, err - } - msg.Properties.MessageID = uid.String() - } - - bin, err := msg.MarshalBinary() - if err != nil { - return false, err - } - - if eb.Size()+len(bin) > int(eb.MaxSize) { - return false, nil - } - - eb.size += len(bin) - eb.marshaledMessages = append(eb.marshaledMessages, bin) - return true, nil -} - -// Clear will zero out the batch size and clear the buffered messages -func (eb *EventBatch) Clear() { - eb.marshaledMessages = [][]byte{} - eb.size = 0 -} - -// Size is the number of bytes in the message batch -func (eb *EventBatch) Size() int { - // calculated data size + batch message wrapper + data wrapper portions of the message - return eb.size + batchMessageWrapperSize + (len(eb.marshaledMessages) * 5) -} - -func (eb *EventBatch) toMsg() (*amqp.Message, error) { - batchMessage := eb.amqpBatchMessage() - - batchMessage.Data = make([][]byte, len(eb.marshaledMessages)) - _ = copy(batchMessage.Data, eb.marshaledMessages) - - if eb.PartitionKey != nil { - batchMessage.Annotations = make(amqp.Annotations) - batchMessage.Annotations[partitionKeyAnnotationName] = eb.PartitionKey - } - - return batchMessage, nil -} - -func (eb *EventBatch) amqpBatchMessage() *amqp.Message { - return &amqp.Message{ - Data: make([][]byte, len(eb.marshaledMessages)), - Format: batchMessageFormat, - Properties: &amqp.MessageProperties{ - MessageID: eb.ID, - }, - } -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/changelog.md b/vendor/github.com/Azure/azure-event-hubs-go/v3/changelog.md deleted file mode 100644 index 7c04d76295d..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/changelog.md +++ /dev/null @@ -1,245 +0,0 @@ -# Change Log - -## `v3.6.2` - -- Update dependencies. - -## `v3.6.1` - -- Deprecate module. Use github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs instead. - -## `v3.6.0` - -- Updated to latest `azure-amqp-common-go` and GA version of `go-amqp` modules. - -## `v3.5.0` - -- Updated to latest `azure-amqp-common-go` and `go-amqp` modules. - -## `v3.4.1` - -- Updated `golang.org/x/net` to the latest version. [#286](https://github.com/Azure/azure-event-hubs-go/pull/286) - -## `v3.4.0` - -- Updated to latest `azure-amqp-common-go` and `go-amqp` modules. - -## `v3.3.20` - -- Fixed issue with a lease existing on start [#277](https://github.com/Azure/azure-event-hubs-go/pull/277) - -## `v3.3.19` - -- Vendor a copy of `azblob` to avoid compilation errors with mismatched versions [#261](https://github.com/Azure/azure-event-hubs-go/issues/261) - -## `v3.3.18` - -- Fixing issue where the LeaserCheckpointer could fail with a "ContainerAlreadyExists" error. (#253) - -## `v3.3.17` - -Updating to the latest go-amqp and azure-amqp-common-go to take advantage of some underlying reliability and interface improvements (#245) - -## `v3.3.16` - -- Exporting a subset of AMQP message properties for the Dapr project. - -## `v3.3.13` - -- We no longer close the link when we receive disposition errors on sending. This allows - us to return errors properly when doing parallel sends on a link that is being - throttled. [PR#234](https://github.com/Azure/azure-event-hubs-go/pull/234) - -## `v3.3.12` - -- Fix bug in sender.Recover() where recovery could get stuck when a link was throttled. [PR#232](#https://github.com/Azure/azure-event-hubs-go/pull/232) - -## `v3.3.11` - -- Allow for controlling the maximum retry count when sending messages. [#226](https://github.com/Azure/azure-event-hubs-go/issues/226) - -## `v3.3.10` - -- Fix sender.Recover() to be goroutine safe [#218](https://github.com/Azure/azure-event-hubs-go/issues/218) -- Skip calling sender.Recover() for some errors [#219](https://github.com/Azure/azure-event-hubs-go/issues/219) - -## `v3.3.9` - -- update the checkpoint after the receiver options are applied -- return the error from reading an old checkpoint when initializing the receiver - -## `v3.3.8` - -- add option to customise initial checkpoint - -## `v3.3.7` - -- add option to prefix checkpoint blob paths - -## `v3.3.6` - -- fix goroutine leak on listener close - -## `v3.3.5` - -- Remove the check for temporary network errors in sender.go [#80](https://github.com/Azure/azure-event-hubs-go/issues/80) - -## `v3.3.4` - -- read AZURE_ENVIRONMENT variable from environment to use the specified value when creating NewHub - -## `v3.3.3` - -- EventBatchIterator drops messages which bigger than 1MB with an error - -## `v3.3.2` - -- passing a context to internal calls that use go-amqp that now expect a context -- updating dependencies in go.mod - -## `v3.3.1` - -- fixed panic caused by interface conversion in event.go [#182](https://github.com/Azure/azure-event-hubs-go/issues/182) -- apply Receive options after populating last stored checkpoint - -## `v3.3.0` - -- add support for sending and receiving custom annotations - -## `v3.2.0` - -- add IoT Hub system properties - -## `v3.1.2` - -- fix errors in message handling being ignored [#155](https://github.com/Azure/azure-event-hubs-go/issues/155) - -## `v3.1.1` - -- Azure storage SAS token regeneration fix [#157](https://github.com/Azure/azure-event-hubs-go/issues/157) - -## `v3.1.0` - -- add support for websocket connections with eph with `eph.WithWebSocketConnection()` - -## `v2.0.4` - -- add comment on the `PartitionID` field in `SystemProperties` to clarify that it will always return a nil value [#131](https://github.com/Azure/azure-event-hubs-go/issues/131) - -## `v2.0.3` - -- fix send on closed channel for GetLeases [#142](https://github.com/Azure/azure-event-hubs-go/issues/142) - -## `v2.0.2` - -- enable partitionKey for sendBatch to fix [#128](https://github.com/Azure/azure-event-hubs-go/issues/128) -- ensure sender receives ack'd messages from EH [#126](https://github.com/Azure/azure-event-hubs-go/issues/126) -- close `leaseCh` on function return in storage.(\*LeaserCheckpointer).GetLeases to fix [#136](https://github.com/Azure/azure-event-hubs-go/issues/136) - -## `v2.0.1` - -- update to amqp 0.11.2 & common 2.1.0 to fix [#115](https://github.com/Azure/azure-event-hubs-go/issues/115) -- added checkpoint attribute to receiver to fix [#95](https://github.com/Azure/azure-event-hubs-go/issues/95) and [#118](https://github.com/Azure/azure-event-hubs-go/issues/118) - -## `v2.0.0` - -- **breaking change:** moved github.com/Azure/azure-amqp-common-go/persist to - github.com/Azure/azure-event-hubs-go/persist -- **breaking change:** changed batch message sending to use a safe batch iterator rather than leaving batch sizing to - the consumer. -- move tracing to devigned/tab so to not have to take a direct dependency on opentracing or opencensus - -## `v1.3.1` - -- cleanup connection after making management request - -## `v1.3.0` - -- add `SystemProperties` to `Event` which contains immutable broker provided metadata (squence number, offset, - enqueued time) - -## `v1.2.0` - -- add websocket support - -## `v1.1.5` - -- add sender recovery handling for `amqp.ErrLinkClose`, `amqp.ErrConnClosed` and `amqp.ErrSessionClosed` - -## `v1.1.4` - -- update to amqp 0.11.0 and change sender to use unsettled rather than receiver second mode - -## `v1.1.3` - -- fix leak in partition persistence -- fix discarding event properties on batch sending - -## `v1.1.2` - -- take dep on updated amqp common which has more permissive RPC status description parsing - -## `v1.1.1` - -- close sender when hub is closed -- ensure links, session and connections are closed gracefully - -## `v1.1.0` - -- add receive option to receive from a timestamp -- fix sender recovery on temporary network failures -- add LeasePersistenceInterval to Azure Storage LeaserCheckpointer to allow for customization of persistence interval - duration - -## `v1.0.1` - -- fix the breaking change from storage; this is not a breaking change for this library -- move from dep to go modules - -## `v1.0.0` - -- change from OpenTracing to OpenCensus -- add more documentation for EPH -- variadic mgmt options - -## `v0.4.0` - -- add partition key to received event [#43](https://github.com/Azure/azure-event-hubs-go/pull/43) -- remove `Receive` in eph in favor of `RegisterHandler`, `UnregisterHandler` and `RegisteredHandlerIDs` [#45](https://github.com/Azure/azure-event-hubs-go/pull/45) - -## `v0.3.1` - -- simplify environmental construction by preferring SAS - -## `v0.3.0` - -- pin version of amqp - -## `v0.2.1` - -- update dependency on common to 0.3.2 to fix retry returning nil error - -## `v0.2.0` - -- add opentracing support -- add context to close functions (breaking change) - -## `v0.1.2` - -- remove an extraneous dependency on satori/uuid - -## `v0.1.1` - -- update common dependency to 0.2.4 -- provide more feedback when sending using testhub -- retry send upon server-busy -- use a new connection for each sender and receiver - -## `v0.1.0` - -- initial release -- basic send and receive -- batched send -- offset persistence -- alpha event host processor with Azure storage persistence -- enabled prefetch batching diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/errors.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/errors.go deleted file mode 100644 index 318dd9e4a7a..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -package eventhub - -type ( - // ErrNoMessages is returned when an operation returned no messages. It is not indicative that there will not be - // more messages in the future. - ErrNoMessages struct{} -) - -func (e ErrNoMessages) Error() string { - return "no messages available" -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/event.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/event.go deleted file mode 100644 index 6722792c53c..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/event.go +++ /dev/null @@ -1,365 +0,0 @@ -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "fmt" - "reflect" - "strings" - "time" - - "github.com/Azure/go-amqp" - "github.com/mitchellh/mapstructure" - - "github.com/Azure/azure-event-hubs-go/v3/persist" -) - -const ( - batchMessageFormat uint32 = 0x80013700 - partitionKeyAnnotationName string = "x-opt-partition-key" - sequenceNumberName string = "x-opt-sequence-number" - enqueueTimeName string = "x-opt-enqueued-time" -) - -type ( - // Event is an Event Hubs message to be sent or received - Event struct { - Data []byte - PartitionKey *string - Properties map[string]interface{} - - ID string - - message *amqp.Message - SystemProperties *SystemProperties - - // RawAMQPMessage is a subset of fields from the underlying AMQP message. - // NOTE: These fields are only used when receiving events and are not sent. - RawAMQPMessage struct { - // Properties are standard properties for an AMQP message. - Properties struct { - // The identity of the user responsible for producing the message. - // The client sets this value, and it MAY be authenticated by intermediaries. - UserID []byte - - // This is a client-specific id that can be used to mark or identify messages - // between clients. - CorrelationID interface{} // uint64, UUID, []byte, or string - - // The content-encoding property is used as a modifier to the content-type. - // When present, its value indicates what additional content encodings have been - // applied to the application-data, and thus what decoding mechanisms need to be - // applied in order to obtain the media-type referenced by the content-type header - // field. - ContentEncoding string - - // The RFC-2046 [RFC2046] MIME type for the message's application-data section - // (body). As per RFC-2046 [RFC2046] this can contain a charset parameter defining - // the character encoding used: e.g., 'text/plain; charset="utf-8"'. - // - // For clarity, as per section 7.2.1 of RFC-2616 [RFC2616], where the content type - // is unknown the content-type SHOULD NOT be set. This allows the recipient the - // opportunity to determine the actual type. Where the section is known to be truly - // opaque binary data, the content-type SHOULD be set to application/octet-stream. - ContentType string - - // A common field for summary information about the message content and purpose. - Subject string - } - } - } - - // SystemProperties are used to store properties that are set by the system. - SystemProperties struct { - SequenceNumber *int64 `mapstructure:"x-opt-sequence-number"` // unique sequence number of the message - EnqueuedTime *time.Time `mapstructure:"x-opt-enqueued-time"` // time the message landed in the message queue - Offset *int64 `mapstructure:"x-opt-offset"` - PartitionID *int16 `mapstructure:"x-opt-partition-id"` // This value will always be nil. For information related to the event's partition refer to the PartitionKey field in this type - PartitionKey *string `mapstructure:"x-opt-partition-key"` - // Nil for messages other than from Azure IoT Hub. deviceId of the device that sent the message. - IoTHubDeviceConnectionID *string `mapstructure:"iothub-connection-device-id"` - // Nil for messages other than from Azure IoT Hub. Used to distinguish devices with the same deviceId, when they have been deleted and re-created. - IoTHubAuthGenerationID *string `mapstructure:"iothub-connection-auth-generation-id"` - // Nil for messages other than from Azure IoT Hub. Contains information about the authentication method used to authenticate the device sending the message. - IoTHubConnectionAuthMethod *string `mapstructure:"iothub-connection-auth-method"` - // Nil for messages other than from Azure IoT Hub. moduleId of the device that sent the message. - IoTHubConnectionModuleID *string `mapstructure:"iothub-connection-module-id"` - // Nil for messages other than from Azure IoT Hub. The time the Device-to-Cloud message was received by IoT Hub. - IoTHubEnqueuedTime *time.Time `mapstructure:"iothub-enqueuedtime"` - // Raw annotations provided on the message. Includes any additional System Properties that are not explicitly mapped. - Annotations map[string]interface{} `mapstructure:"-"` - } - - mapStructureTag struct { - Name string - PersistEmpty bool - } -) - -// NewEventFromString builds an Event from a string message -func NewEventFromString(message string) *Event { - return NewEvent([]byte(message)) -} - -// NewEvent builds an Event from a slice of data -func NewEvent(data []byte) *Event { - return &Event{ - Data: data, - } -} - -// GetCheckpoint returns the checkpoint information on the Event -func (e *Event) GetCheckpoint() persist.Checkpoint { - var offset string - var enqueueTime time.Time - var sequenceNumber int64 - if val, ok := e.message.Annotations[offsetAnnotationName]; ok { - offset = fmt.Sprintf("%v", val) - } - - if val, ok := e.message.Annotations[enqueueTimeName]; ok { - enqueueTime = val.(time.Time) - } - - if val, ok := e.message.Annotations[sequenceNumberName]; ok { - sequenceNumber = val.(int64) - } - - return persist.NewCheckpoint(offset, sequenceNumber, enqueueTime) -} - -// GetKeyValues implements tab.Carrier -func (e *Event) GetKeyValues() map[string]interface{} { - return e.Properties -} - -// Set implements tab.Carrier -func (e *Event) Set(key string, value interface{}) { - if e.Properties == nil { - e.Properties = make(map[string]interface{}) - } - e.Properties[key] = value -} - -// Get will fetch a property from the event -func (e *Event) Get(key string) (interface{}, bool) { - if e.Properties == nil { - return nil, false - } - - if val, ok := e.Properties[key]; ok { - return val, true - } - return nil, false -} - -func (e *Event) toMsg() (*amqp.Message, error) { - msg := e.message - if msg == nil { - msg = amqp.NewMessage(e.Data) - } - - msg.Properties = &amqp.MessageProperties{ - MessageID: e.ID, - } - - if len(e.Properties) > 0 { - msg.ApplicationProperties = make(map[string]interface{}) - for key, value := range e.Properties { - msg.ApplicationProperties[key] = value - } - } - - if e.SystemProperties != nil { - // Set the raw annotations first (they may be nil) and add the explicit - // system properties second to ensure they're set properly. - msg.Annotations = addMapToAnnotations(msg.Annotations, e.SystemProperties.Annotations) - - sysPropMap, err := encodeStructureToMap(e.SystemProperties) - if err != nil { - return nil, err - } - msg.Annotations = addMapToAnnotations(msg.Annotations, sysPropMap) - } - - if e.PartitionKey != nil { - if msg.Annotations == nil { - msg.Annotations = make(amqp.Annotations) - } - - msg.Annotations[partitionKeyAnnotationName] = e.PartitionKey - } - - return msg, nil -} - -func eventFromMsg(msg *amqp.Message) (*Event, error) { - return newEvent(msg.Data[0], msg) -} - -func newEvent(data []byte, msg *amqp.Message) (*Event, error) { - event := &Event{ - Data: data, - message: msg, - } - - if msg.Properties != nil { - if id, ok := msg.Properties.MessageID.(string); ok { - event.ID = id - } - - event.RawAMQPMessage.Properties.UserID = msg.Properties.UserID - - if msg.Properties.Subject != nil { - event.RawAMQPMessage.Properties.Subject = *msg.Properties.Subject - } - - event.RawAMQPMessage.Properties.CorrelationID = msg.Properties.CorrelationID - - if msg.Properties.ContentEncoding != nil { - event.RawAMQPMessage.Properties.ContentEncoding = *msg.Properties.ContentEncoding - } - - if msg.Properties.ContentType != nil { - event.RawAMQPMessage.Properties.ContentType = *msg.Properties.ContentType - } - } - - if msg.Annotations != nil { - if val, ok := msg.Annotations[partitionKeyAnnotationName]; ok { - if valStr, ok := val.(string); ok { - event.PartitionKey = &valStr - } - } - - if err := mapstructure.WeakDecode(msg.Annotations, &event.SystemProperties); err != nil { - fmt.Println("error decoding...", err) - return event, err - } - - // If we didn't populate any system properties, set up the struct so we - // can put the annotations in it - if event.SystemProperties == nil { - event.SystemProperties = new(SystemProperties) - } - - // Take all string-keyed annotations because the protocol reserves all - // numeric keys for itself and there are no numeric keys defined in the - // protocol today: - // - // http://www.amqp.org/sites/amqp.org/files/amqp.pdf (section 3.2.10) - // - // This approach is also consistent with the behavior of .NET: - // - // https://docs.microsoft.com/en-us/dotnet/api/azure.messaging.eventhubs.eventdata.systemproperties?view=azure-dotnet#Azure_Messaging_EventHubs_EventData_SystemProperties - event.SystemProperties.Annotations = make(map[string]interface{}) - for key, val := range msg.Annotations { - if s, ok := key.(string); ok { - event.SystemProperties.Annotations[s] = val - } - } - } - - if msg != nil { - event.Properties = msg.ApplicationProperties - } - - return event, nil -} - -func encodeStructureToMap(structPointer interface{}) (map[string]interface{}, error) { - valueOfStruct := reflect.ValueOf(structPointer) - s := valueOfStruct.Elem() - if s.Kind() != reflect.Struct { - return nil, fmt.Errorf("must provide a struct") - } - - encoded := make(map[string]interface{}) - for i := 0; i < s.NumField(); i++ { - f := s.Field(i) - if f.IsValid() && f.CanSet() { - tf := s.Type().Field(i) - tag, err := parseMapStructureTag(tf.Tag) - if err != nil { - return nil, err - } - - // Skip any entries with an exclude tag - if tag.Name == "-" { - continue - } - - if tag != nil { - switch f.Kind() { - case reflect.Ptr: - if !f.IsNil() || tag.PersistEmpty { - if f.IsNil() { - encoded[tag.Name] = nil - } else { - encoded[tag.Name] = f.Elem().Interface() - } - } - default: - if f.Interface() != reflect.Zero(f.Type()).Interface() || tag.PersistEmpty { - encoded[tag.Name] = f.Interface() - } - } - } - } - } - - return encoded, nil -} - -func parseMapStructureTag(tag reflect.StructTag) (*mapStructureTag, error) { - str, ok := tag.Lookup("mapstructure") - if !ok { - return nil, nil - } - - mapTag := new(mapStructureTag) - split := strings.Split(str, ",") - mapTag.Name = strings.TrimSpace(split[0]) - - if len(split) > 1 { - for _, tagKey := range split[1:] { - switch tagKey { - case "persistempty": - mapTag.PersistEmpty = true - default: - return nil, fmt.Errorf("key %q is not understood", tagKey) - } - } - } - return mapTag, nil -} - -func addMapToAnnotations(a amqp.Annotations, m map[string]interface{}) amqp.Annotations { - if a == nil && len(m) > 0 { - a = make(amqp.Annotations) - } - for key, val := range m { - a[key] = val - } - return a -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/http_mgmt.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/http_mgmt.go deleted file mode 100644 index 6263e7780ec..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/http_mgmt.go +++ /dev/null @@ -1,183 +0,0 @@ -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "bytes" - "context" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" - - "github.com/Azure/azure-amqp-common-go/v4/auth" - "github.com/devigned/tab" -) - -const ( - serviceBusSchema = "http://schemas.microsoft.com/netservices/2010/10/servicebus/connect" - atomSchema = "http://www.w3.org/2005/Atom" - applicationXML = "application/xml" -) - -type ( - // entityManager provides CRUD functionality for Service Bus entities (Queues, Topics, Subscriptions...) - entityManager struct { - TokenProvider auth.TokenProvider - Host string - } - - // BaseEntityDescription provides common fields which are part of Queues, Topics and Subscriptions - BaseEntityDescription struct { - InstanceMetadataSchema *string `xml:"xmlns:i,attr,omitempty"` - ServiceBusSchema *string `xml:"xmlns,attr,omitempty"` - } - - managementError struct { - XMLName xml.Name `xml:"Error"` - Code int `xml:"Code"` - Detail string `xml:"Detail"` - } -) - -func (m *managementError) String() string { - return fmt.Sprintf("Code: %d, Details: %s", m.Code, m.Detail) -} - -// newEntityManager creates a new instance of an entityManager given a token provider and host -func newEntityManager(host string, tokenProvider auth.TokenProvider) *entityManager { - return &entityManager{ - Host: host, - TokenProvider: tokenProvider, - } -} - -// Get performs an HTTP Get for a given entity path -func (em *entityManager) Get(ctx context.Context, entityPath string) (*http.Response, error) { - span, ctx := em.startSpanFromContext(ctx, "sb.EntityManger.Get") - defer span.End() - - return em.Execute(ctx, http.MethodGet, entityPath, http.NoBody) -} - -// Put performs an HTTP PUT for a given entity path and body -func (em *entityManager) Put(ctx context.Context, entityPath string, body []byte) (*http.Response, error) { - span, ctx := em.startSpanFromContext(ctx, "sb.EntityManger.Put") - defer span.End() - - return em.Execute(ctx, http.MethodPut, entityPath, bytes.NewReader(body)) -} - -// Delete performs an HTTP DELETE for a given entity path -func (em *entityManager) Delete(ctx context.Context, entityPath string) (*http.Response, error) { - span, ctx := em.startSpanFromContext(ctx, "sb.EntityManger.Delete") - defer span.End() - - return em.Execute(ctx, http.MethodDelete, entityPath, http.NoBody) -} - -// Post performs an HTTP POST for a given entity path and body -func (em *entityManager) Post(ctx context.Context, entityPath string, body []byte) (*http.Response, error) { - span, ctx := em.startSpanFromContext(ctx, "sb.EntityManger.Post") - defer span.End() - - return em.Execute(ctx, http.MethodPost, entityPath, bytes.NewReader(body)) -} - -// Execute performs an HTTP request given a http method, path and body -func (em *entityManager) Execute(ctx context.Context, method string, entityPath string, body io.Reader) (*http.Response, error) { - span, ctx := em.startSpanFromContext(ctx, "sb.EntityManger.Execute") - defer span.End() - - client := &http.Client{ - Timeout: 60 * time.Second, - } - req, err := http.NewRequest(method, em.Host+strings.TrimPrefix(entityPath, "/"), body) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - req = addAtomXMLContentType(req) - req = addAPIVersion201704(req) - applyRequestInfo(span, req) - req, err = em.addAuthorization(req) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - req = req.WithContext(ctx) - res, err := client.Do(req) - - if err != nil { - tab.For(ctx).Error(err) - } - - if res != nil { - applyResponseInfo(span, res) - } - - return res, err -} - -func (em *entityManager) addAuthorization(req *http.Request) (*http.Request, error) { - signature, err := em.TokenProvider.GetToken(req.URL.String()) - if err != nil { - return nil, err - } - - req.Header.Add("Authorization", signature.Token) - return req, nil -} - -func addAtomXMLContentType(req *http.Request) *http.Request { - if req.Method != http.MethodGet && req.Method != http.MethodHead { - req.Header.Add("content-Type", "application/atom+xml;type=entry;charset=utf-8") - } - return req -} - -func addAPIVersion201704(req *http.Request) *http.Request { - q := req.URL.Query() - q.Add("api-version", "2017-04") - req.URL.RawQuery = q.Encode() - return req -} - -func xmlDoc(content []byte) []byte { - return []byte(xml.Header + string(content)) -} - -func formatManagementError(body []byte) error { - var mgmtError managementError - unmarshalErr := xml.Unmarshal(body, &mgmtError) - if unmarshalErr != nil { - return errors.New(string(body)) - } - - return fmt.Errorf("error code: %d, Details: %s", mgmtError.Code, mgmtError.Detail) -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/hub.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/hub.go deleted file mode 100644 index 8ac3b76bdb1..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/hub.go +++ /dev/null @@ -1,792 +0,0 @@ -// Package eventhub provides functionality for interacting with Azure Event Hubs. -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "context" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "os" - "path" - "sync" - - "github.com/Azure/azure-amqp-common-go/v4/aad" - "github.com/Azure/azure-amqp-common-go/v4/auth" - "github.com/Azure/azure-amqp-common-go/v4/conn" - "github.com/Azure/azure-amqp-common-go/v4/sas" - "github.com/Azure/azure-amqp-common-go/v4/uuid" - "github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub" - "github.com/Azure/go-amqp" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/devigned/tab" - - "github.com/Azure/azure-event-hubs-go/v3/atom" - "github.com/Azure/azure-event-hubs-go/v3/persist" -) - -const ( - maxUserAgentLen = 128 - rootUserAgent = "/golang-event-hubs" -) - -type ( - // Hub provides the ability to send and receive Event Hub messages - Hub struct { - name string - namespace *namespace - receivers map[string]*receiver - sender *sender - senderPartitionID *string - senderRetryOptions *senderRetryOptions - receiverMu sync.Mutex - senderMu sync.Mutex - offsetPersister persist.CheckpointPersister - userAgent string - } - - // Handler is the function signature for any receiver of events - Handler func(ctx context.Context, event *Event) error - - // Sender provides the ability to send a messages - Sender interface { - Send(ctx context.Context, event *Event, opts ...SendOption) error - SendBatch(ctx context.Context, batch *EventBatch, opts ...SendOption) error - } - - // PartitionedReceiver provides the ability to receive messages from a given partition - PartitionedReceiver interface { - Receive(ctx context.Context, partitionID string, handler Handler, opts ...ReceiveOption) (ListenerHandle, error) - } - - // Manager provides the ability to query management node information about a node - Manager interface { - GetRuntimeInformation(context.Context) (HubRuntimeInformation, error) - GetPartitionInformation(context.Context, string) (HubPartitionRuntimeInformation, error) - } - - // HubOption provides structure for configuring new Event Hub clients. For building new Event Hubs, see - // HubManagementOption. - HubOption func(h *Hub) error - - // HubManager provides CRUD functionality for Event Hubs - HubManager struct { - *entityManager - } - - // HubEntity is the Azure Event Hub description of a Hub for management activities - HubEntity struct { - *HubDescription - Name string - } - - // hubFeed is a specialized feed containing hubEntries - hubFeed struct { - *atom.Feed - Entries []hubEntry `xml:"entry"` - } - - // hubEntry is a specialized Hub feed entry - hubEntry struct { - *atom.Entry - Content *hubContent `xml:"content"` - } - - // hubContent is a specialized Hub body for an Atom entry - hubContent struct { - XMLName xml.Name `xml:"content"` - Type string `xml:"type,attr"` - HubDescription HubDescription `xml:"EventHubDescription"` - } - - // HubDescription is the content type for Event Hub management requests - HubDescription struct { - XMLName xml.Name `xml:"EventHubDescription"` - MessageRetentionInDays *int32 `xml:"MessageRetentionInDays,omitempty"` - SizeInBytes *int64 `xml:"SizeInBytes,omitempty"` - Status *eventhub.EntityStatus `xml:"Status,omitempty"` - CreatedAt *date.Time `xml:"CreatedAt,omitempty"` - UpdatedAt *date.Time `xml:"UpdatedAt,omitempty"` - PartitionCount *int32 `xml:"PartitionCount,omitempty"` - PartitionIDs *[]string `xml:"PartitionIds>string,omitempty"` - EntityAvailabilityStatus *string `xml:"EntityAvailabilityStatus,omitempty"` - BaseEntityDescription - } - - // HubManagementOption provides structure for configuring new Event Hubs - HubManagementOption func(description *HubDescription) error -) - -// NewHubManagerFromConnectionString builds a HubManager from an Event Hub connection string -func NewHubManagerFromConnectionString(connStr string) (*HubManager, error) { - ns, err := newNamespace(namespaceWithConnectionString(connStr)) - if err != nil { - return nil, err - } - return &HubManager{ - entityManager: newEntityManager(ns.getHTTPSHostURI(), ns.tokenProvider), - }, nil -} - -// NewHubManagerFromAzureEnvironment builds a HubManager from a Event Hub name, SAS or AAD token provider and Azure Environment -func NewHubManagerFromAzureEnvironment(namespace string, tokenProvider auth.TokenProvider, env azure.Environment) (*HubManager, error) { - ns, err := newNamespace(namespaceWithAzureEnvironment(namespace, tokenProvider, env)) - if err != nil { - return nil, err - } - return &HubManager{ - entityManager: newEntityManager(ns.getHTTPSHostURI(), ns.tokenProvider), - }, nil -} - -// Delete deletes an Event Hub entity by name -func (hm *HubManager) Delete(ctx context.Context, name string) error { - span, ctx := hm.startSpanFromContext(ctx, "eh.HubManager.Delete") - defer span.End() - - res, err := hm.entityManager.Delete(ctx, "/"+name) - if res != nil { - defer res.Body.Close() - } - - return err -} - -// HubWithMessageRetentionInDays configures an Event Hub to retain messages for that number of days -func HubWithMessageRetentionInDays(days int32) HubManagementOption { - return func(hd *HubDescription) error { - hd.MessageRetentionInDays = &days - return nil - } -} - -// HubWithPartitionCount configures an Event Hub to have the specified number of partitions. More partitions == more throughput -func HubWithPartitionCount(count int32) HubManagementOption { - return func(hd *HubDescription) error { - hd.PartitionCount = &count - return nil - } -} - -// Put creates or updates an Event Hubs Hub -func (hm *HubManager) Put(ctx context.Context, name string, opts ...HubManagementOption) (*HubEntity, error) { - span, ctx := hm.startSpanFromContext(ctx, "eh.HubManager.Put") - defer span.End() - - hd := new(HubDescription) - for _, opt := range opts { - if err := opt(hd); err != nil { - return nil, err - } - } - - hd.ServiceBusSchema = to.StringPtr(serviceBusSchema) - - he := &hubEntry{ - Entry: &atom.Entry{ - AtomSchema: atomSchema, - }, - Content: &hubContent{ - Type: applicationXML, - HubDescription: *hd, - }, - } - - reqBytes, err := xml.Marshal(he) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - reqBytes = xmlDoc(reqBytes) - res, err := hm.entityManager.Put(ctx, "/"+name, reqBytes) - if res != nil { - defer res.Body.Close() - } - - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - b, err := io.ReadAll(res.Body) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - var entry hubEntry - err = xml.Unmarshal(b, &entry) - if err != nil { - return nil, formatManagementError(b) - } - return hubEntryToEntity(&entry), nil -} - -// List fetches all of the Hub for an Event Hubs Namespace -func (hm *HubManager) List(ctx context.Context) ([]*HubEntity, error) { - span, ctx := hm.startSpanFromContext(ctx, "eh.HubManager.List") - defer span.End() - - res, err := hm.entityManager.Get(ctx, `/$Resources/EventHubs`) - if res != nil { - defer res.Body.Close() - } - - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - b, err := io.ReadAll(res.Body) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - var feed hubFeed - err = xml.Unmarshal(b, &feed) - if err != nil { - return nil, formatManagementError(b) - } - - qd := make([]*HubEntity, len(feed.Entries)) - for idx, entry := range feed.Entries { - qd[idx] = hubEntryToEntity(&entry) - } - return qd, nil -} - -// Get fetches an Event Hubs Hub entity by name -func (hm *HubManager) Get(ctx context.Context, name string) (*HubEntity, error) { - span, ctx := hm.startSpanFromContext(ctx, "eh.HubManager.Get") - defer span.End() - - res, err := hm.entityManager.Get(ctx, name) - if res != nil { - defer res.Body.Close() - } - - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - if res.StatusCode == http.StatusNotFound { - return nil, nil - } - - b, err := io.ReadAll(res.Body) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - var entry hubEntry - err = xml.Unmarshal(b, &entry) - if err != nil { - if isEmptyFeed(b) { - return nil, nil - } - return nil, formatManagementError(b) - } - - return hubEntryToEntity(&entry), nil -} - -func isEmptyFeed(b []byte) bool { - var emptyFeed hubFeed - feedErr := xml.Unmarshal(b, &emptyFeed) - return feedErr == nil && emptyFeed.Title == "Publicly Listed Services" -} - -func hubEntryToEntity(entry *hubEntry) *HubEntity { - return &HubEntity{ - HubDescription: &entry.Content.HubDescription, - Name: entry.Title, - } -} - -// NewHub creates a new Event Hub client for sending and receiving messages -// NOTE: If the AZURE_ENVIRONMENT variable is set, it will be used to set the ServiceBusEndpointSuffix -// from the corresponding azure.Environment type at the end of the namespace host string. The default -// is azure.PublicCloud. -func NewHub(namespace, name string, tokenProvider auth.TokenProvider, opts ...HubOption) (*Hub, error) { - env := azure.PublicCloud - if e := os.Getenv("AZURE_ENVIRONMENT"); e != "" { - var err error - env, err = azure.EnvironmentFromName(e) - if err != nil { - return nil, err - } - } - ns, err := newNamespace(namespaceWithAzureEnvironment(namespace, tokenProvider, env)) - if err != nil { - return nil, err - } - - h := &Hub{ - name: name, - namespace: ns, - offsetPersister: persist.NewMemoryPersister(), - userAgent: rootUserAgent, - receivers: make(map[string]*receiver), - senderRetryOptions: newSenderRetryOptions(), - } - - for _, opt := range opts { - err := opt(h) - if err != nil { - return nil, err - } - } - - return h, nil -} - -// NewHubWithNamespaceNameAndEnvironment creates a new Event Hub client for sending and receiving messages from -// environment variables with supplied namespace and name which will attempt to build a token provider from -// environment variables. If unable to build a AAD Token Provider it will fall back to a SAS token provider. If neither -// can be built, it will return error. -// -// SAS TokenProvider environment variables: -// -// There are two sets of environment variables which can produce a SAS TokenProvider -// -// 1. Expected Environment Variables: -// - "EVENTHUB_KEY_NAME" the name of the Event Hub key -// - "EVENTHUB_KEY_VALUE" the secret for the Event Hub key named in "EVENTHUB_KEY_NAME" -// -// 2. Expected Environment Variable: -// - "EVENTHUB_CONNECTION_STRING" connection string from the Azure portal -// -// AAD TokenProvider environment variables: -// -// 1. client Credentials: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID" and -// "AZURE_CLIENT_SECRET" -// -// 2. client Certificate: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID", -// "AZURE_CERTIFICATE_PATH" and "AZURE_CERTIFICATE_PASSWORD" -// -// 3. Managed Service Identity (MSI): attempt to authenticate via MSI on the default local MSI internally addressable IP -// and port. See: adal.GetMSIVMEndpoint() -// -// The Azure Environment used can be specified using the name of the Azure Environment set in the AZURE_ENVIRONMENT var. -func NewHubWithNamespaceNameAndEnvironment(namespace, name string, opts ...HubOption) (*Hub, error) { - var provider auth.TokenProvider - provider, sasErr := sas.NewTokenProvider(sas.TokenProviderWithEnvironmentVars()) - if sasErr == nil { - return NewHub(namespace, name, provider, opts...) - } - - provider, aadErr := aad.NewJWTProvider(aad.JWTProviderWithEnvironmentVars()) - if aadErr == nil { - return NewHub(namespace, name, provider, opts...) - } - - return nil, fmt.Errorf("neither Azure Active Directory nor SAS token provider could be built - AAD error: %v, SAS error: %v", aadErr, sasErr) -} - -// NewHubFromEnvironment creates a new Event Hub client for sending and receiving messages from environment variables -// -// Expected Environment Variables: -// - "EVENTHUB_NAMESPACE" the namespace of the Event Hub instance -// - "EVENTHUB_NAME" the name of the Event Hub instance -// -// This method depends on NewHubWithNamespaceNameAndEnvironment which will attempt to build a token provider from -// environment variables. If unable to build a AAD Token Provider it will fall back to a SAS token provider. If neither -// can be built, it will return error. -// -// SAS TokenProvider environment variables: -// -// There are two sets of environment variables which can produce a SAS TokenProvider -// -// 1. Expected Environment Variables: -// - "EVENTHUB_NAMESPACE" the namespace of the Event Hub instance -// - "EVENTHUB_KEY_NAME" the name of the Event Hub key -// - "EVENTHUB_KEY_VALUE" the secret for the Event Hub key named in "EVENTHUB_KEY_NAME" -// -// 2. Expected Environment Variable: -// - "EVENTHUB_CONNECTION_STRING" connection string from the Azure portal -// -// AAD TokenProvider environment variables: -// -// 1. client Credentials: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID" and -// "AZURE_CLIENT_SECRET" -// -// 2. client Certificate: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID", -// "AZURE_CERTIFICATE_PATH" and "AZURE_CERTIFICATE_PASSWORD" -// -// 3. Managed Service Identity (MSI): attempt to authenticate via MSI -// -// The Azure Environment used can be specified using the name of the Azure Environment set in the AZURE_ENVIRONMENT var. -func NewHubFromEnvironment(opts ...HubOption) (*Hub, error) { - const envErrMsg = "environment var %s must not be empty" - var namespace, name string - - if namespace = os.Getenv("EVENTHUB_NAMESPACE"); namespace == "" { - return nil, fmt.Errorf(envErrMsg, "EVENTHUB_NAMESPACE") - } - - if name = os.Getenv("EVENTHUB_NAME"); name == "" { - return nil, fmt.Errorf(envErrMsg, "EVENTHUB_NAME") - } - - return NewHubWithNamespaceNameAndEnvironment(namespace, name, opts...) -} - -// NewHubFromConnectionString creates a new Event Hub client for sending and receiving messages from a connection string -// formatted like the following: -// -// Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName -func NewHubFromConnectionString(connStr string, opts ...HubOption) (*Hub, error) { - parsed, err := conn.ParsedConnectionFromStr(connStr) - if err != nil { - return nil, err - } - - ns, err := newNamespace(namespaceWithConnectionString(connStr)) - if err != nil { - return nil, err - } - - h := &Hub{ - name: parsed.HubName, - namespace: ns, - offsetPersister: persist.NewMemoryPersister(), - userAgent: rootUserAgent, - receivers: make(map[string]*receiver), - senderRetryOptions: newSenderRetryOptions(), - } - - for _, opt := range opts { - err := opt(h) - if err != nil { - return nil, err - } - } - - return h, err -} - -// GetRuntimeInformation fetches runtime information from the Event Hub management node -func (h *Hub) GetRuntimeInformation(ctx context.Context) (*HubRuntimeInformation, error) { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.GetRuntimeInformation") - defer span.End() - client := newClient(h.namespace, h.name) - c, err := h.namespace.newConnection(ctx) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - defer func() { - if err := c.Close(); err != nil { - tab.For(ctx).Error(err) - } - }() - - info, err := client.GetHubRuntimeInformation(ctx, c) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - return info, nil -} - -// GetPartitionInformation fetches runtime information about a specific partition from the Event Hub management node -func (h *Hub) GetPartitionInformation(ctx context.Context, partitionID string) (*HubPartitionRuntimeInformation, error) { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.GetPartitionInformation") - defer span.End() - client := newClient(h.namespace, h.name) - c, err := h.namespace.newConnection(ctx) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - - defer func() { - if err := c.Close(); err != nil { - tab.For(ctx).Error(err) - } - }() - - info, err := client.GetHubPartitionRuntimeInformation(ctx, c, partitionID) - if err != nil { - return nil, err - } - - return info, nil -} - -// Close drains and closes all of the existing senders, receivers and connections -func (h *Hub) Close(ctx context.Context) error { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.Close") - defer span.End() - - if h.sender != nil { - if err := h.sender.Close(ctx); err != nil { - if rErr := h.closeReceivers(ctx); rErr != nil { - if !isConnectionClosed(rErr) { - tab.For(ctx).Error(rErr) - } - } - - if !isConnectionClosed(err) { - tab.For(ctx).Error(err) - return err - } - - return nil - } - } - - // close receivers and return error - err := h.closeReceivers(ctx) - if err != nil && !isConnectionClosed(err) { - tab.For(ctx).Error(err) - return err - } - - return nil -} - -// closeReceivers will close the receivers on the hub and return the last error -func (h *Hub) closeReceivers(ctx context.Context) error { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.closeReceivers") - defer span.End() - - var lastErr error - for _, r := range h.receivers { - if err := r.Close(ctx); err != nil { - tab.For(ctx).Error(err) - lastErr = err - } - } - return lastErr -} - -// Receive subscribes for messages sent to the provided entityPath. -// -// The context passed into Receive is only used to limit the amount of time the caller will wait for the Receive -// method to connect to the Event Hub. The context passed in does not control the lifetime of Receive after connection. -// -// If Receive encounters an initial error setting up the connection, an error will be returned. -// -// If Receive starts successfully, a *ListenerHandle and a nil error will be returned. The ListenerHandle exposes -// methods which will help manage the life span of the receiver. -// -// # ListenerHandle.Close(ctx) closes the receiver -// -// # ListenerHandle.Done() signals the consumer when the receiver has stopped -// -// ListenerHandle.Err() provides the last error the listener encountered and was unable to recover from -func (h *Hub) Receive(ctx context.Context, partitionID string, handler Handler, opts ...ReceiveOption) (*ListenerHandle, error) { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.Receive") - defer span.End() - - h.receiverMu.Lock() - defer h.receiverMu.Unlock() - - receiver, err := h.newReceiver(ctx, partitionID, opts...) - if err != nil { - return nil, err - } - - // Todo: change this to use name rather than identifier - if r, ok := h.receivers[receiver.getIdentifier()]; ok { - if err := r.Close(ctx); err != nil { - tab.For(ctx).Error(err) - } - } - - h.receivers[receiver.getIdentifier()] = receiver - listenerContext := receiver.Listen(handler) - - return listenerContext, nil -} - -// Send sends an event to the Event Hub -// -// Send will retry sending the message for as long as the context allows -func (h *Hub) Send(ctx context.Context, event *Event, opts ...SendOption) error { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.Send") - defer span.End() - - sender, err := h.getSender(ctx) - if err != nil { - return err - } - - return sender.Send(ctx, event, opts...) -} - -// SendBatch sends a batch of events to the Hub -func (h *Hub) SendBatch(ctx context.Context, iterator BatchIterator, opts ...BatchOption) error { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.SendBatch") - defer span.End() - - sender, err := h.getSender(ctx) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - batchOptions := &BatchOptions{ - MaxSize: DefaultMaxMessageSizeInBytes, - } - - for _, opt := range opts { - if err := opt(batchOptions); err != nil { - tab.For(ctx).Error(err) - return err - } - } - - for !iterator.Done() { - id, err := uuid.NewV4() - if err != nil { - tab.For(ctx).Error(err) - return err - } - - batch, err := iterator.Next(id.String(), batchOptions) - - if err != nil { - tab.For(ctx).Error(err) - return err - } - - if err := sender.trySend(ctx, batch); err != nil { - tab.For(ctx).Error(err) - return err - } - } - - return nil -} - -// HubWithPartitionedSender configures the Hub instance to send to a specific event Hub partition -func HubWithPartitionedSender(partitionID string) HubOption { - return func(h *Hub) error { - h.senderPartitionID = &partitionID - return nil - } -} - -// HubWithOffsetPersistence configures the Hub instance to read and write offsets so that if a Hub is interrupted, it -// can resume after the last consumed event. -func HubWithOffsetPersistence(offsetPersister persist.CheckpointPersister) HubOption { - return func(h *Hub) error { - h.offsetPersister = offsetPersister - return nil - } -} - -// HubWithUserAgent configures the Hub to append the given string to the user agent sent to the server -// -// This option can be specified multiple times to add additional segments. -// -// Max user agent length is specified by the const maxUserAgentLen. -func HubWithUserAgent(userAgent string) HubOption { - return func(h *Hub) error { - return h.appendAgent(userAgent) - } -} - -// HubWithEnvironment configures the Hub to use the specified environment. -// -// By default, the Hub instance will use Azure US Public cloud environment -func HubWithEnvironment(env azure.Environment) HubOption { - return func(h *Hub) error { - h.namespace.host = "amqps://" + h.namespace.name + "." + env.ServiceBusEndpointSuffix - return nil - } -} - -// HubWithWebSocketConnection configures the Hub to use a WebSocket connection wss:// rather than amqps:// -func HubWithWebSocketConnection() HubOption { - return func(h *Hub) error { - h.namespace.useWebSocket = true - return nil - } -} - -// HubWithSenderMaxRetryCount configures the Hub to retry sending messages `maxRetryCount` times, -// in addition to the original attempt. -// 0 indicates no retries, and < 0 will cause infinite retries. -func HubWithSenderMaxRetryCount(maxRetryCount int) HubOption { - return func(h *Hub) error { - h.senderRetryOptions.maxRetries = maxRetryCount - return nil - } -} - -func (h *Hub) appendAgent(userAgent string) error { - ua := path.Join(h.userAgent, userAgent) - if len(ua) > maxUserAgentLen { - return fmt.Errorf("user agent string has surpassed the max length of %d", maxUserAgentLen) - } - h.userAgent = ua - return nil -} - -func (h *Hub) getSender(ctx context.Context) (*sender, error) { - h.senderMu.Lock() - defer h.senderMu.Unlock() - - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.getSender") - defer span.End() - - if h.sender == nil { - s, err := h.newSender(ctx, h.senderRetryOptions) - if err != nil { - tab.For(ctx).Error(err) - return nil, err - } - h.sender = s - } - return h.sender, nil -} - -func isRecoverableCloseError(err error) bool { - var linkError *amqp.LinkError - // an *amqp.LinkError with a nil RemoteErr means that the link was closed client-side - return isConnectionClosed(err) || isSessionClosed(err) || (errors.As(err, &linkError) && linkError.RemoteErr != nil) -} - -func isConnectionClosed(err error) bool { - var connErr *amqp.ConnError - return errors.As(err, &connErr) -} - -func isSessionClosed(err error) bool { - var sessionErr *amqp.SessionError - return errors.As(err, &sessionErr) -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/namespace.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/namespace.go deleted file mode 100644 index eed0b0765e8..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/namespace.go +++ /dev/null @@ -1,143 +0,0 @@ -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "context" - "runtime" - "strings" - - "github.com/Azure/azure-amqp-common-go/v4/auth" - "github.com/Azure/azure-amqp-common-go/v4/cbs" - "github.com/Azure/azure-amqp-common-go/v4/conn" - "github.com/Azure/azure-amqp-common-go/v4/sas" - "github.com/Azure/go-amqp" - "github.com/Azure/go-autorest/autorest/azure" - "golang.org/x/net/websocket" -) - -type ( - namespace struct { - name string - tokenProvider auth.TokenProvider - host string - useWebSocket bool - } - - // namespaceOption provides structure for configuring a new Event Hub namespace - namespaceOption func(h *namespace) error -) - -// newNamespaceWithConnectionString configures a namespace with the information provided in a Service Bus connection string -func namespaceWithConnectionString(connStr string) namespaceOption { - return func(ns *namespace) error { - parsed, err := conn.ParsedConnectionFromStr(connStr) - if err != nil { - return err - } - ns.name = parsed.Namespace - ns.host = parsed.Host - provider, err := sas.NewTokenProvider(sas.TokenProviderWithKey(parsed.KeyName, parsed.Key)) - if err != nil { - return err - } - ns.tokenProvider = provider - return nil - } -} - -func namespaceWithAzureEnvironment(name string, tokenProvider auth.TokenProvider, env azure.Environment) namespaceOption { - return func(ns *namespace) error { - ns.name = name - ns.tokenProvider = tokenProvider - ns.host = "amqps://" + ns.name + "." + env.ServiceBusEndpointSuffix - return nil - } -} - -// newNamespace creates a new namespace configured through NamespaceOption(s) -func newNamespace(opts ...namespaceOption) (*namespace, error) { - ns := &namespace{} - - for _, opt := range opts { - err := opt(ns) - if err != nil { - return nil, err - } - } - - return ns, nil -} - -func (ns *namespace) newConnection(ctx context.Context) (*amqp.Conn, error) { - host := ns.getAmqpsHostURI() - - defaultConnOptions := amqp.ConnOptions{ - Properties: map[string]any{ - "product": "MSGolangClient", - "version": Version, - "platform": runtime.GOOS, - "framework": runtime.Version(), - "user-agent": rootUserAgent, - }, - SASLType: amqp.SASLTypeAnonymous(), - } - - if ns.useWebSocket { - trimmedHost := strings.TrimPrefix(ns.host, "amqps://") - wssConn, err := websocket.Dial("wss://"+trimmedHost+"/$servicebus/websocket", "amqp", "http://localhost/") - if err != nil { - return nil, err - } - - wssConn.PayloadType = websocket.BinaryFrame - defaultConnOptions.HostName = trimmedHost - return amqp.NewConn(ctx, wssConn, &defaultConnOptions) - } - - return amqp.Dial(ctx, host, &defaultConnOptions) -} - -func (ns *namespace) negotiateClaim(ctx context.Context, conn *amqp.Conn, entityPath string) error { - span, ctx := ns.startSpanFromContext(ctx, "eh.namespace.negotiateClaim") - defer span.End() - - audience := ns.getEntityAudience(entityPath) - return cbs.NegotiateClaim(ctx, audience, conn, ns.tokenProvider) -} - -func (ns *namespace) getAmqpsHostURI() string { - return ns.host + "/" -} - -func (ns *namespace) getAmqpHostURI() string { - return strings.Replace(ns.getAmqpsHostURI(), "amqps", "amqp", 1) -} - -func (ns *namespace) getEntityAudience(entityPath string) string { - return ns.getAmqpsHostURI() + entityPath -} - -func (ns *namespace) getHTTPSHostURI() string { - return strings.Replace(ns.getAmqpsHostURI(), "amqps", "https", 1) -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/checkpoint.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/checkpoint.go deleted file mode 100644 index aa0766f0aef..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/checkpoint.go +++ /dev/null @@ -1,69 +0,0 @@ -package persist - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "time" -) - -const ( - // StartOfStream is a constant defined to represent the start of a partition stream in EventHub. - StartOfStream = "-1" - - // EndOfStream is a constant defined to represent the current end of a partition stream in EventHub. - // This can be used as an offset argument in receiver creation to start receiving from the latest - // event, instead of a specific offset or point in time. - EndOfStream = "@latest" -) - -type ( - // Checkpoint is the information needed to determine the last message processed - Checkpoint struct { - Offset string `json:"offset"` - SequenceNumber int64 `json:"sequenceNumber"` - EnqueueTime time.Time `json:"enqueueTime"` - } -) - -// NewCheckpointFromStartOfStream returns a checkpoint for the start of the stream -func NewCheckpointFromStartOfStream() Checkpoint { - return Checkpoint{ - Offset: StartOfStream, - } -} - -// NewCheckpointFromEndOfStream returns a checkpoint for the end of the stream -func NewCheckpointFromEndOfStream() Checkpoint { - return Checkpoint{ - Offset: EndOfStream, - } -} - -// NewCheckpoint contains the information needed to checkpoint Event Hub progress -func NewCheckpoint(offset string, sequence int64, enqueueTime time.Time) Checkpoint { - return Checkpoint{ - Offset: offset, - SequenceNumber: sequence, - EnqueueTime: enqueueTime, - } -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/file.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/file.go deleted file mode 100644 index 501a82aabe8..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/file.go +++ /dev/null @@ -1,100 +0,0 @@ -package persist - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "bytes" - "encoding/json" - "io" - "os" - "path" - "strings" - "sync" -) - -type ( - // FilePersister implements CheckpointPersister for saving to the file system - FilePersister struct { - directory string - mu sync.Mutex - } -) - -// NewFilePersister creates a FilePersister for saving to a given directory -func NewFilePersister(directory string) (*FilePersister, error) { - err := os.MkdirAll(directory, 0777) - return &FilePersister{ - directory: directory, - }, err -} - -func (fp *FilePersister) Write(namespace, name, consumerGroup, partitionID string, checkpoint Checkpoint) error { - fp.mu.Lock() - defer fp.mu.Unlock() - - key := getFilePath(namespace, name, consumerGroup, partitionID) - filePath := path.Join(fp.directory, key) - bits, err := json.Marshal(checkpoint) - if err != nil { - return err - } - - file, err := os.Create(filePath) - if err != nil { - return err - } - _, err = file.Write(bits) - if err != nil { - return err - } - - return file.Close() -} - -func (fp *FilePersister) Read(namespace, name, consumerGroup, partitionID string) (Checkpoint, error) { - fp.mu.Lock() - defer fp.mu.Unlock() - - key := getFilePath(namespace, name, consumerGroup, partitionID) - filePath := path.Join(fp.directory, key) - - f, err := os.Open(filePath) - if err != nil { - return NewCheckpointFromStartOfStream(), err - } - - buf := bytes.NewBuffer(nil) - _, err = io.Copy(buf, f) - if err != nil { - return NewCheckpointFromStartOfStream(), err - } - - var checkpoint Checkpoint - err = json.Unmarshal(buf.Bytes(), &checkpoint) - return checkpoint, err -} - -func getFilePath(namespace, name, consumerGroup, partitionID string) string { - key := strings.Join([]string{namespace, name, consumerGroup, partitionID}, "_") - return strings.Replace(key, "$", "", -1) -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/persist.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/persist.go deleted file mode 100644 index 5422cde01aa..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/persist/persist.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package persist provides abstract structures for checkpoint persistence. -package persist - -import ( - "path" - "sync" -) - -type ( - // CheckpointPersister provides persistence for the received offset for a given namespace, hub name, consumer group, partition Id and - // offset so that if a receiver where to be interrupted, it could resume after the last consumed event. - CheckpointPersister interface { - Write(namespace, name, consumerGroup, partitionID string, checkpoint Checkpoint) error - Read(namespace, name, consumerGroup, partitionID string) (Checkpoint, error) - } - - // MemoryPersister is a default implementation of a Hub CheckpointPersister, which will persist offset information in - // memory. - MemoryPersister struct { - values map[string]Checkpoint - mu sync.Mutex - } -) - -// NewMemoryPersister creates a new in-memory storage for checkpoints -// -// MemoryPersister is only intended to be shared with EventProcessorHosts within the same process. This implementation -// is a toy. You should probably use the Azure Storage implementation or any other that provides durable storage for -// checkpoints. -func NewMemoryPersister() *MemoryPersister { - return &MemoryPersister{ - values: make(map[string]Checkpoint), - } -} - -func (p *MemoryPersister) Write(namespace, name, consumerGroup, partitionID string, checkpoint Checkpoint) error { - p.mu.Lock() - defer p.mu.Unlock() - - key := getPersistenceKey(namespace, name, consumerGroup, partitionID) - p.values[key] = checkpoint - return nil -} - -func (p *MemoryPersister) Read(namespace, name, consumerGroup, partitionID string) (Checkpoint, error) { - p.mu.Lock() - defer p.mu.Unlock() - - key := getPersistenceKey(namespace, name, consumerGroup, partitionID) - if offset, ok := p.values[key]; ok { - return offset, nil - } - return NewCheckpointFromStartOfStream(), nil -} - -func getPersistenceKey(namespace, name, consumerGroup, partitionID string) string { - return path.Join(namespace, name, consumerGroup, partitionID) -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/readme.md b/vendor/github.com/Azure/azure-event-hubs-go/v3/readme.md deleted file mode 100644 index 3cd75d8d27e..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/readme.md +++ /dev/null @@ -1,474 +0,0 @@ -**Please note, a newer package is available: [azeventhubs](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/README.md) as of [2023-05-09].** -**We strongly encourage you to upgrade. See the [Migration Guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/migrationguide.md) for more details.** - -# Microsoft Azure Event Hubs Client for Golang -[![Go Report Card](https://goreportcard.com/badge/github.com/Azure/azure-event-hubs-go)](https://goreportcard.com/report/github.com/Azure/azure-event-hubs-go) -[![godoc](https://godoc.org/github.com/Azure/azure-event-hubs-go?status.svg)](https://godoc.org/github.com/Azure/azure-event-hubs-go) -[![Build Status](https://travis-ci.org/Azure/azure-event-hubs-go.svg?branch=master)](https://travis-ci.org/Azure/azure-event-hubs-go) -[![Coverage Status](https://coveralls.io/repos/github/Azure/azure-event-hubs-go/badge.svg?branch=master)](https://coveralls.io/github/Azure/azure-event-hubs-go?branch=master) - -Azure Event Hubs is a highly scalable publish-subscribe service that can ingest millions of events per second and -stream them into multiple applications. This lets you process and analyze the massive amounts of data produced by your -connected devices and applications. Once Event Hubs has collected the data, you can retrieve, transform and store it by -using any real-time analytics provider or with batching/storage adapters. - -Refer to the [online documentation](https://azure.microsoft.com/services/event-hubs/) to learn more about Event Hubs in -general. - -This library is a pure Golang implementation of Azure Event Hubs over AMQP. - -## Install with Go modules -If you want to use stable versions of the library, please use Go modules. - -**NOTE**: versions prior to 3.0.0 depend on pack.ag/amqp which is no longer maintained. Any new code should not use versions prior to 3.0.0. - -### Using go get targeting version 3.x.x -``` bash -go get -u github.com/Azure/azure-event-hubs-go/v3 -``` - -### Using go get targeting version 2.x.x -``` bash -go get -u github.com/Azure/azure-event-hubs-go/v2 -``` - -### Using go get targeting version 1.x.x -``` bash -go get -u github.com/Azure/azure-event-hubs-go -``` - -## Using Event Hubs -In this section we'll cover some basics of the library to help you get started. - -This library has two main dependencies, [vcabbage/amqp](https://github.com/vcabbage/amqp) and -[Azure AMQP Common](https://github.com/Azure/azure-amqp-common-go). The former provides the AMQP protocol implementation -and the latter provides some common authentication, persistence and request-response message flows. - -### Quick start -Let's send and receive `"hello, world!"` to all the partitions in an Event Hub. -```go -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - "time" - - "github.com/Azure/azure-event-hubs-go/v3" -) - -func main() { - connStr := "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" - hub, err := eventhub.NewHubFromConnectionString(connStr) - - if err != nil { - fmt.Println(err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - - // send a single message into a random partition - err = hub.Send(ctx, eventhub.NewEventFromString("hello, world!")) - if err != nil { - fmt.Println(err) - return - } - - handler := func(c context.Context, event *eventhub.Event) error { - fmt.Println(string(event.Data)) - return nil - } - - // listen to each partition of the Event Hub - runtimeInfo, err := hub.GetRuntimeInformation(ctx) - if err != nil { - fmt.Println(err) - return - } - - for _, partitionID := range runtimeInfo.PartitionIDs { - // Start receiving messages - // - // Receive blocks while attempting to connect to hub, then runs until listenerHandle.Close() is called - // <- listenerHandle.Done() signals listener has stopped - // listenerHandle.Err() provides the last error the receiver encountered - listenerHandle, err := hub.Receive(ctx, partitionID, handler) - if err != nil { - fmt.Println(err) - return - } - } - - // Wait for a signal to quit: - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt, os.Kill) - <-signalChan - - err = hub.Close(context.Background()) - if err != nil { - fmt.Println(err) - } -} -``` - -### Environment Variables -In the above example, the `Hub` instance was created using environment variables. Here is a list of environment -variables used in this project. - -#### Event Hub env vars -- `EVENTHUB_NAMESPACE` the namespace of the Event Hub instance -- `EVENTHUB_NAME` the name of the Event Hub instance - -#### SAS TokenProvider environment variables: -There are two sets of environment variables which can produce a SAS TokenProvider -1) Expected Environment Variables: - - `EVENTHUB_KEY_NAME` the name of the Event Hub key - - `EVENTHUB_KEY_VALUE` the secret for the Event Hub key named in `EVENTHUB_KEY_NAME` - -2) Expected Environment Variable: - - `EVENTHUB_CONNECTION_STRING` connection string from the Azure portal like: `Endpoint=sb://foo.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=fluffypuppy;EntityPath=hubName` - -#### AAD TokenProvider environment variables: -1) Client Credentials: attempt to authenticate with a [Service Principal](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal) via - - `AZURE_TENANT_ID` the Azure Tenant ID - - `AZURE_CLIENT_ID` the Azure Application ID - - `AZURE_CLIENT_SECRET` a key / secret for the corresponding application -2) Client Certificate: attempt to authenticate with a Service Principal via - - `AZURE_TENANT_ID` the Azure Tenant ID - - `AZURE_CLIENT_ID` the Azure Application ID - - `AZURE_CERTIFICATE_PATH` the path to the certificate file - - `AZURE_CERTIFICATE_PASSWORD` the password for the certificate - -The Azure Environment used can be specified using the name of the Azure Environment set in "AZURE_ENVIRONMENT" var. - -### Authentication -Event Hubs offers a couple different paths for authentication, shared access signatures (SAS) and Azure Active Directory (AAD) -JWT authentication. Both token types are available for use and are exposed through the `TokenProvider` interface. -```go -// TokenProvider abstracts the fetching of authentication tokens -TokenProvider interface { - GetToken(uri string) (*Token, error) -} -``` - -#### SAS token provider -The SAS token provider uses the namespace of the Event Hub, the name of the "Shared access policy" key and the value of -the key to produce a token. - -You can create new Shared access policies through the Azure portal as shown below. -![SAS policies in the Azure portal](./_content/sas-policy.png) - -You can create a SAS token provider in a couple different ways. You can build one with a key name and key value like -this. -```go -provider := sas.TokenProviderWithKey("myKeyName", "myKeyValue") -``` - -Or, you can create a token provider from environment variables like this. -```go -// TokenProviderWithEnvironmentVars creates a new SAS TokenProvider from environment variables -// -// There are two sets of environment variables which can produce a SAS TokenProvider -// -// 1) Expected Environment Variables: -// - "EVENTHUB_KEY_NAME" the name of the Event Hub key -// - "EVENTHUB_KEY_VALUE" the secret for the Event Hub key named in "EVENTHUB_KEY_NAME" -// -// 2) Expected Environment Variable: -// - "EVENTHUB_CONNECTION_STRING" connection string from the Azure portal - -provider, err := sas.NewTokenProvider(sas.TokenProviderWithEnvironmentVars()) -``` - -#### AAD JWT token provider -The AAD JWT token provider uses Azure Active Directory to authenticate the service and acquire a token (JWT) which is -used to authenticate with Event Hubs. The authenticated identity must have `Contributor` role based authorization for -the Event Hub instance. [This article](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-role-based-access-control) -provides more information about this preview feature. - -The easiest way to create a JWT token provider is via environment variables. -```go -// 1. Client Credentials: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID" and -// "AZURE_CLIENT_SECRET" -// -// 2. Client Certificate: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID", -// "AZURE_CERTIFICATE_PATH" and "AZURE_CERTIFICATE_PASSWORD" -// -// 3. Managed Service Identity (MSI): attempt to authenticate via MSI -// -// -// The Azure Environment used can be specified using the name of the Azure Environment set in "AZURE_ENVIRONMENT" var. -provider, err := aad.NewJWTProvider(aad.JWTProviderWithEnvironmentVars()) -``` - -You can also provide your own `adal.ServicePrincipalToken`. -```go -config := &aad.TokenProviderConfiguration{ - ResourceURI: azure.PublicCloud.ResourceManagerEndpoint, - Env: &azure.PublicCloud, -} - -spToken, err := config.NewServicePrincipalToken() -if err != nil { - // handle err -} -provider, err := aad.NewJWTProvider(aad.JWTProviderWithAADToken(aadToken)) -``` - -### Send And Receive -The basics of messaging are sending and receiving messages. Here are the different ways you can do that. - -#### Sending to a particular partition -By default, a Hub will send messages any of the load balanced partitions. Sometimes you want to send to only a -particular partition. You can do this in two ways. -1) You can supply a partition key on an event - ```go - event := eventhub.NewEventFromString("foo") - event.PartitionKey = "bazz" - hub.Send(ctx, event) // send event to the partition ID to which partition key hashes - ``` -2) You can build a hub instance that will only send to one partition. - ```go - partitionID := "0" - hub, err := eventhub.NewHubFromEnvironment(eventhub.HubWithPartitionedSender(partitionID)) - ``` - -#### Sending batches of events -Sending a batch of messages is more efficient than sending a single message. `SendBatch` takes an `*EventBatchIterator` that will automatically create batches from a slice of `*Event`. -```go -import ( - eventhub "github.com/Azure/azure-event-hubs-go/v3" -) -... -var events []*eventhub.Event -events = append(events, eventhub.NewEventFromString("one")) -events = append(events, eventhub.NewEventFromString("two")) -events = append(events, eventhub.NewEventFromString("three")) - -err := client.SendBatch(ctx, eventhub.NewEventBatchIterator(events...)) -``` - -#### Controlling retries for sends -By default, a Hub will retry sending messages forever if the errors that occur are retryable (for instance, network timeouts. You can control the number of retries using the `HubWithSenderMaxRetryCount` option when constructing your Hub client. For instance, to limit the number of retries to 5: - -```go -// NOTE: you can use any 'NewHub*' method. -eventhub.NewHubFromConnectionString("", eventhub.HubWithSenderMaxRetryCount(5)) -``` - -#### Receiving -When receiving messages from an Event Hub, you always need to specify the partition you'd like to receive from. -`Hub.Receive` is a non-blocking call, which takes a message handler func and options. Since Event Hub is just a long -log of messages, you also have to tell it where to start from. By default, a receiver will start from the beginning -of the log, but there are options to help you specify your starting offset. - -The `Receive` func returns a handle to the running receiver and an error. If error is returned, the receiver was unable -to start. If error is nil, the receiver is running and can be stopped by calling `Close` on the `Hub` or the handle -returned. - -- Receive messages from a partition from the beginning of the log - ```go - handle, err := hub.Receive(ctx, partitionID, func(ctx context.Context, event *eventhub.Event) error { - // do stuff - }) - ``` -- Receive from the latest message onward - ```go - handle, err := hub.Receive(ctx, partitionID, handler, eventhub.ReceiveWithLatestOffset()) - ``` -- Receive from a specified offset - ```go - handle, err := hub.Receive(ctx, partitionID, handler, eventhub.ReceiveWithStartingOffset(offset)) - ``` - -At some point, a receiver process is going to stop. You will likely want it to start back up at the spot that it stopped -processing messages. This is where message offsets can be used to start from where you have left off. - -The `Hub` struct can be customized to use an `persist.CheckpointPersister`. By default, a `Hub` uses an in-memory -`CheckpointPersister`, but accepts anything that implements the `persist.CheckpointPersister` interface. - -```go -// CheckpointPersister provides persistence for the received offset for a given namespace, hub name, consumer group, partition Id and -// offset so that if a receiver where to be interrupted, it could resume after the last consumed event. -CheckpointPersister interface { - Write(namespace, name, consumerGroup, partitionID string, checkpoint Checkpoint) error - Read(namespace, name, consumerGroup, partitionID string) (Checkpoint, error) -} -``` - -For example, you could use the persist.FilePersister to save your checkpoints to a directory. -```go -persister, err := persist.NewFilePersister(directoryPath) -if err != nil { - // handle err -} -hub, err := eventhub.NewHubFromEnvironment(eventhub.HubWithOffsetPersistence(persister)) -``` - -## Event Processor Host -The key to scale for Event Hubs is the idea of partitioned consumers. In contrast to the -[competing consumers pattern](https://docs.microsoft.com/en-us/previous-versions/msp-n-p/dn568101(v=pandp.10)), -the partitioned consumer pattern enables high scale by removing the contention bottleneck and facilitating end to end -parallelism. - -The Event Processor Host (EPH) is an intelligent consumer agent that simplifies the management of checkpointing, -leasing, and parallel event readers. EPH is intended to be run across multiple processes and machines while load -balancing message consumers. A message consumer in EPH will take a lease on a partition, begin processing messages and -periodically write a check point to a persistent store. If at any time a new EPH process is added or lost, the remaining -processors will balance the existing leases amongst the set of EPH processes. - -The default implementation of partition leasing and check pointing is based on Azure Storage. Below is an example using -EPH to start listening to all of the partitions of an Event Hub and print the messages received. - -### Receiving Events - -```go -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - "time" - - "github.com/Azure/azure-amqp-common-go/v4/conn" - "github.com/Azure/azure-amqp-common-go/v4/sas" - "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Azure/azure-event-hubs-go/v3/eph" - "github.com/Azure/azure-event-hubs-go/v3/storage" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/go-autorest/autorest/azure" -) - -func main() { - // Azure Storage account information - storageAccountName := "mystorageaccount" - storageAccountKey := "Zm9vCg==" - // Azure Storage container to store leases and checkpoints - storageContainerName := "ephcontainer" - - // Azure Event Hub connection string - eventHubConnStr := "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" - parsed, err := conn.ParsedConnectionFromStr(eventHubConnStr) - if err != nil { - // handle error - } - - // create a new Azure Storage Leaser / Checkpointer - cred, err := azblob.NewSharedKeyCredential(storageAccountName, storageAccountKey) - if err != nil { - fmt.Println(err) - return - } - - leaserCheckpointer, err := storage.NewStorageLeaserCheckpointer(cred, storageAccountName, storageContainerName, azure.PublicCloud) - if err != nil { - fmt.Println(err) - return - } - - // SAS token provider for Azure Event Hubs - provider, err := sas.NewTokenProvider(sas.TokenProviderWithKey(parsed.KeyName, parsed.Key)) - if err != nil { - fmt.Println(err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - // create a new EPH processor - processor, err := eph.New(ctx, parsed.Namespace, parsed.HubName, provider, leaserCheckpointer, leaserCheckpointer) - if err != nil { - fmt.Println(err) - return - } - - // register a message handler -- many can be registered - handlerID, err := processor.RegisterHandler(ctx, - func(c context.Context, e *eventhub.Event) error { - fmt.Println(string(e.Data)) - return nil - }) - if err != nil { - fmt.Println(err) - return - } - - fmt.Printf("handler id: %q is running\n", handlerID) - - // unregister a handler to stop that handler from receiving events - // processor.UnregisterHandler(ctx, handleID) - - // start handling messages from all of the partitions balancing across multiple consumers - err = processor.StartNonBlocking(ctx) - if err != nil { - fmt.Println(err) - return - } - - // Wait for a signal to quit: - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt, os.Kill) - <-signalChan - - err = processor.Close(context.Background()) - if err != nil { - fmt.Println(err) - return - } -} -``` - -## Examples -- [HelloWorld: Producer and Consumer](./_examples/helloworld): an example of sending and receiving messages from an -Event Hub instance. -- [Batch Processing](./_examples/batchprocessing): an example of handling events in batches - -# Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -See [CONTRIBUTING.md](./.github/CONTRIBUTING.md). - -## Running Tests -To setup the integration test environment, ensure the following pre-requisites are in place -- [install WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10) (if on Windows) -- [install golang](https://golang.org/doc/install) -- add paths to .profile - - export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin - - export GOPATH=$HOME/go -- install go dev dependencies - - run `go get github.com/fzipp/gocyclo` - - run `go get -u golang.org/x/lint/golint` -- run the following bash commands - - `sudo apt install jq` -- install gcc - - on Ubuntu: - - `sudo apt update` - - `sudo apt install build-essential` -- [download terraform](https://www.terraform.io/downloads.html) and add to the path -- install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-apt?view=azure-cli-latest) -- run `az login` - -To run all tests run `make test` - -To cleanup dev tools in `go.mod` and `go.sum` prior to check-in run `make tidy` or `go mode tidy` - -# License - -MIT, see [LICENSE](./LICENSE). diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/receiver.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/receiver.go deleted file mode 100644 index 38be01f9d4d..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/receiver.go +++ /dev/null @@ -1,498 +0,0 @@ -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "context" - "errors" - "fmt" - "time" - - common "github.com/Azure/azure-amqp-common-go/v4" - "github.com/Azure/go-amqp" - "github.com/devigned/tab" - - "github.com/Azure/azure-event-hubs-go/v3/persist" -) - -const ( - // DefaultConsumerGroup is the default name for a event stream consumer group - DefaultConsumerGroup = "$Default" - - offsetAnnotationName = "x-opt-offset" - enqueuedTimeAnnotationName = "x-opt-enqueued-time" - - amqpAnnotationFormat = "amqp.annotation.%s >%s '%v'" - - defaultPrefetchCount = 1000 - - epochKey = MsftVendor + ":epoch" -) - -// receiver provides session and link handling for a receiving entity path -type ( - receiver struct { - hub *Hub - connection *amqp.Conn - session *session - receiver *amqp.Receiver - consumerGroup string - partitionID string - prefetchCount uint32 - done func() - epoch *int64 - lastError error - checkpoint persist.Checkpoint - } - - // ReceiveOption provides a structure for configuring receivers - ReceiveOption func(receiver *receiver) error - - // ListenerHandle provides the ability to close or listen to the close of a Receiver - ListenerHandle struct { - r *receiver - ctx context.Context - } -) - -// ReceiveWithConsumerGroup configures the receiver to listen to a specific consumer group -func ReceiveWithConsumerGroup(consumerGroup string) ReceiveOption { - return func(receiver *receiver) error { - receiver.consumerGroup = consumerGroup - return nil - } -} - -// ReceiveWithStartingOffset configures the receiver to start at a given position in the event stream -func ReceiveWithStartingOffset(offset string) ReceiveOption { - return func(receiver *receiver) error { - receiver.checkpoint = persist.NewCheckpoint(offset, 0, time.Time{}) - return nil - } -} - -// ReceiveWithLatestOffset configures the receiver to start at a given position in the event stream -func ReceiveWithLatestOffset() ReceiveOption { - return func(receiver *receiver) error { - receiver.checkpoint = persist.NewCheckpointFromEndOfStream() - return nil - } -} - -// ReceiveFromTimestamp configures the receiver to start receiving from a specific point in time in the event stream -func ReceiveFromTimestamp(t time.Time) ReceiveOption { - return func(receiver *receiver) error { - receiver.checkpoint = persist.NewCheckpoint("", 0, t) - return nil - } -} - -// ReceiveWithPrefetchCount configures the receiver to attempt to fetch as many messages as the prefetch amount -func ReceiveWithPrefetchCount(prefetch uint32) ReceiveOption { - return func(receiver *receiver) error { - receiver.prefetchCount = prefetch - return nil - } -} - -// ReceiveWithEpoch configures the receiver to use an epoch. Specifying an epoch for a receiver will cause any receiver -// with a lower epoch value to be disconnected from the message broker. If a receiver attempts to start with a lower -// epoch than the broker currently knows for a given partition, the broker will respond with an error on initiation of -// the receive request. -// -// Ownership enforcement: Once you created an epoch based receiver, you cannot create a non-epoch receiver to the same -// consumer group / partition combo until all receivers to the combo are closed. -// -// Ownership stealing: If a receiver with higher epoch value is created for a consumer group / partition combo, any -// older epoch receiver to that combo will be force closed. -func ReceiveWithEpoch(epoch int64) ReceiveOption { - return func(receiver *receiver) error { - receiver.epoch = &epoch - return nil - } -} - -// newReceiver creates a new Service Bus message listener given an AMQP client and an entity path -func (h *Hub) newReceiver(ctx context.Context, partitionID string, opts ...ReceiveOption) (*receiver, error) { - span, ctx := h.startSpanFromContext(ctx, "eh.Hub.newReceiver") - defer span.End() - - receiver := &receiver{ - hub: h, - consumerGroup: DefaultConsumerGroup, - prefetchCount: defaultPrefetchCount, - partitionID: partitionID, - } - - // apply options after fetching the persisted checkpoint in case the options - // specify a custom checkpoint to start from. This allows the custom - // checkpoint to override the stored one. - for _, opt := range opts { - if err := opt(receiver); err != nil { - return nil, err - } - } - - // update checkpoint if no checkpoint is specified and if old checkpoint is successfully read from e.g. file or memory - if receiver.checkpoint == (persist.Checkpoint{}) { - oldCheckpoint, err := receiver.getLastReceivedCheckpoint() - if err != nil { - return nil, err - } - receiver.checkpoint = oldCheckpoint - } - - if err := receiver.storeLastReceivedCheckpoint(receiver.checkpoint); err != nil { - return nil, err - } - - tab.For(ctx).Debug("creating a new receiver") - return receiver, receiver.newSessionAndLink(ctx) -} - -// Close will close the AMQP session and link of the receiver -func (r *receiver) Close(ctx context.Context) error { - span, _ := r.startConsumerSpanFromContext(ctx, "eh.receiver.Close") - defer span.End() - - if r.done != nil { - r.done() - } - - err := r.receiver.Close(ctx) - if err != nil { - tab.For(ctx).Error(err) - if sessionErr := r.session.Close(ctx); sessionErr != nil { - tab.For(ctx).Error(sessionErr) - } - - if connErr := r.connection.Close(); connErr != nil { - tab.For(ctx).Error(connErr) - } - - return err - } - - if sessionErr := r.session.Close(ctx); sessionErr != nil { - tab.For(ctx).Error(sessionErr) - - if connErr := r.connection.Close(); connErr != nil { - tab.For(ctx).Error(connErr) - } - - return sessionErr - } - - return r.connection.Close() -} - -// Recover will attempt to close the current session and link, then rebuild them -func (r *receiver) Recover(ctx context.Context) error { - span, ctx := r.startConsumerSpanFromContext(ctx, "eh.receiver.Recover") - defer span.End() - - _ = r.connection.Close() // we expect the receiver is in an error state - return r.newSessionAndLink(ctx) -} - -// Listen start a listener for messages sent to the entity path -func (r *receiver) Listen(handler Handler) *ListenerHandle { - ctx, done := context.WithCancel(context.Background()) - r.done = done - - span, ctx := r.startConsumerSpanFromContext(ctx, "eh.receiver.Listen") - defer span.End() - - messages := make(chan *amqp.Message) - go r.listenForMessages(ctx, messages) - go r.handleMessages(ctx, messages, handler) - - return &ListenerHandle{ - r: r, - ctx: ctx, - } -} - -func (r *receiver) handleMessages(ctx context.Context, messages chan *amqp.Message, handler Handler) { - span, ctx := r.startConsumerSpanFromContext(ctx, "eh.receiver.handleMessages") - defer span.End() - for { - select { - case <-ctx.Done(): - return - case msg := <-messages: - r.handleMessage(ctx, msg, handler) - } - } -} - -func (r *receiver) handleMessage(ctx context.Context, msg *amqp.Message, handler Handler) { - const optName = "eh.Receiver.handleMessage" - - event, err := eventFromMsg(msg) - if err != nil { - tab.For(ctx).Error(err) - r.lastError = err - r.done() - } - - ctx, span := tab.StartSpanWithRemoteParent(ctx, optName, event) - defer span.End() - - id := messageID(msg) - if str, ok := id.(string); ok { - span.AddAttributes(tab.StringAttribute("eh.message_id", str)) - } - - err = handler(ctx, event) - if err != nil { - err = r.receiver.ModifyMessage(ctx, msg, &amqp.ModifyMessageOptions{ - DeliveryFailed: true, - }) - if err != nil { - tab.For(ctx).Error(err) - } - tab.For(ctx).Error(fmt.Errorf("message modified(true, false, nil): id: %v", id)) - return - } - err = r.receiver.AcceptMessage(ctx, msg) - if err != nil { - tab.For(ctx).Error(err) - } - - err = r.storeLastReceivedCheckpoint(event.GetCheckpoint()) - if err != nil { - tab.For(ctx).Error(err) - } -} - -func (r *receiver) listenForMessages(ctx context.Context, msgChan chan *amqp.Message) { - span, ctx := r.startConsumerSpanFromContext(ctx, "eh.receiver.listenForMessages") - defer span.End() - - for { - msg, err := r.listenForMessage(ctx) - if err == nil { - select { - case msgChan <- msg: - // Sent - continue - case <-ctx.Done(): - // Context canceled before send, ignore and shut down - tab.For(ctx).Debug("context done") - return - } - } - - select { - case <-ctx.Done(): - tab.For(ctx).Debug("context done") - return - default: - var linkError *amqp.LinkError - if errors.As(err, &linkError) && linkError.RemoteErr != nil && linkError.RemoteErr.Condition == "amqp:link:stolen" { - tab.For(ctx).Debug("link has been stolen by a higher epoch") - _ = r.Close(ctx) - return - } - - _, retryErr := common.Retry(10, 10*time.Second, func() (interface{}, error) { - sp, ctx := r.startConsumerSpanFromContext(ctx, "eh.receiver.listenForMessages.tryRecover") - defer sp.End() - - tab.For(ctx).Debug("recovering connection") - err := r.Recover(ctx) - if err == nil { - tab.For(ctx).Debug("recovered connection") - return nil, nil - } - - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - return nil, common.Retryable(err.Error()) - } - }) - - if retryErr != nil { - tab.For(ctx).Debug("retried, but error was unrecoverable") - r.lastError = retryErr - _ = r.Close(ctx) - return - } - } - } -} - -func (r *receiver) listenForMessage(ctx context.Context) (*amqp.Message, error) { - span, ctx := r.startConsumerSpanFromContext(ctx, "eh.receiver.listenForMessage") - defer span.End() - - msg, err := r.receiver.Receive(ctx, nil) - if err != nil { - tab.For(ctx).Debug(err.Error()) - return nil, err - } - - id := messageID(msg) - if str, ok := id.(string); ok { - span.AddAttributes(tab.StringAttribute("he.message_id", str)) - } - return msg, nil -} - -// newSessionAndLink will replace the session and link on the receiver -func (r *receiver) newSessionAndLink(ctx context.Context) error { - span, ctx := r.startConsumerSpanFromContext(ctx, "eh.receiver.newSessionAndLink") - defer span.End() - - connection, err := r.hub.namespace.newConnection(ctx) - if err != nil { - return err - } - r.connection = connection - - address := r.getAddress() - err = r.hub.namespace.negotiateClaim(ctx, connection, address) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - amqpSession, err := connection.NewSession(ctx, nil) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - checkpoint, err := r.getLastReceivedCheckpoint() - - if err != nil { - tab.For(ctx).Error(err) - return err - } - - offsetExpression := getOffsetExpression(checkpoint) - - r.session, err = newSession(amqpSession) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - opts := amqp.ReceiverOptions{ - Credit: int32(r.prefetchCount), - SettlementMode: amqp.ReceiverSettleModeFirst.Ptr(), - Filters: []amqp.LinkFilter{amqp.NewSelectorFilter(offsetExpression)}, - } - - if r.epoch != nil { - opts.Properties = map[string]any{ - epochKey: *r.epoch, - } - } - - amqpReceiver, err := amqpSession.NewReceiver(ctx, address, &opts) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - r.receiver = amqpReceiver - return nil -} - -func (r *receiver) getLastReceivedCheckpoint() (persist.Checkpoint, error) { - return r.offsetPersister().Read(r.namespaceName(), r.hubName(), r.consumerGroup, r.partitionID) -} - -func (r *receiver) storeLastReceivedCheckpoint(checkpoint persist.Checkpoint) error { - return r.offsetPersister().Write(r.namespaceName(), r.hubName(), r.consumerGroup, r.partitionID, checkpoint) -} - -func (r *receiver) getAddress() string { - return fmt.Sprintf("%s/ConsumerGroups/%s/Partitions/%s", r.hubName(), r.consumerGroup, r.partitionID) -} - -func (r *receiver) getIdentifier() string { - if r.epoch != nil { - return fmt.Sprintf("%s/ConsumerGroups/%s/Partitions/%s/epoch/%d", r.hubName(), r.consumerGroup, r.partitionID, *r.epoch) - } - return r.getAddress() -} - -func (r *receiver) getFullIdentifier() string { - return r.hub.namespace.getEntityAudience(r.getIdentifier()) -} - -func (r *receiver) namespaceName() string { - return r.hub.namespace.name -} - -func (r *receiver) hubName() string { - return r.hub.name -} - -func (r *receiver) offsetPersister() persist.CheckpointPersister { - return r.hub.offsetPersister -} - -func messageID(msg *amqp.Message) interface{} { - var id interface{} = "null" - if msg.Properties != nil { - id = msg.Properties.MessageID - } - return id -} - -// Close will close the listener -func (lc *ListenerHandle) Close(ctx context.Context) error { - return lc.r.Close(ctx) -} - -// Done will close the channel when the listener has stopped -func (lc *ListenerHandle) Done() <-chan struct{} { - return lc.ctx.Done() -} - -// Err will return the last error encountered -func (lc *ListenerHandle) Err() error { - if lc.r.lastError != nil { - return lc.r.lastError - } - return lc.ctx.Err() -} - -// getOffsetExpression calculates a selector expression based on the Offset or EnqueueTime of a Checkpoint. -func getOffsetExpression(checkpoint persist.Checkpoint) string { - if checkpoint.Offset == "" { - // time-based, non-inclusive - // ex: amqp.annotation.x-opt-enqueued-time > '165805323000' - return fmt.Sprintf(amqpAnnotationFormat, enqueuedTimeAnnotationName, "", checkpoint.EnqueueTime.UnixNano()/int64(time.Millisecond)) - } - - // offset based, non-inclusive - // ex: "amqp.annotation.x-opt-offset > '100'" - return fmt.Sprintf(amqpAnnotationFormat, offsetAnnotationName, "", checkpoint.Offset) -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/sender.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/sender.go deleted file mode 100644 index 86bbc1a2272..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/sender.go +++ /dev/null @@ -1,395 +0,0 @@ -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "context" - "fmt" - "net" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/Azure/azure-amqp-common-go/v4/uuid" - "github.com/Azure/go-amqp" - "github.com/devigned/tab" - "github.com/jpillora/backoff" -) - -const ( - errorServerBusy amqp.ErrCond = "com.microsoft:server-busy" - errorTimeout amqp.ErrCond = "com.microsoft:timeout" -) - -// sender provides session and link handling for an sending entity path -type ( - sender struct { - hub *Hub - connection *amqp.Conn - session *session - sender atomic.Value // holds a *amqp.Sender - partitionID *string - Name string - retryOptions *senderRetryOptions - // cond and recovering are used to atomically implement Recover() - cond *sync.Cond - recovering bool - } - - // SendOption provides a way to customize a message on sending - SendOption func(event *Event) error - - eventer interface { - tab.Carrier - toMsg() (*amqp.Message, error) - } - - // amqpSender is the bare minimum we need from an AMQP based sender. - // (used for testing) - // Implemented by *amqp.Sender - amqpSender interface { - LinkName() string - Send(ctx context.Context, msg *amqp.Message, opts *amqp.SendOptions) error - Close(ctx context.Context) error - } - - // getAmqpSender should return a live sender (exactly mimics the `amqpSender()` function below) - // (used for testing) - getAmqpSender func() amqpSender - - senderRetryOptions struct { - recoveryBackoff *backoff.Backoff - - // maxRetries controls how many times we try (in addition to the first attempt) - // 0 indicates no retries, and < 0 will cause infinite retries. - // Defaults to -1. - maxRetries int - } -) - -func newSenderRetryOptions() *senderRetryOptions { - return &senderRetryOptions{ - recoveryBackoff: &backoff.Backoff{ - Min: 10 * time.Millisecond, - Max: 4 * time.Second, - Jitter: true, - }, - maxRetries: -1, // default to infinite retries - } -} - -// newSender creates a new Service Bus message sender given an AMQP client and entity path -func (h *Hub) newSender(ctx context.Context, retryOptions *senderRetryOptions) (*sender, error) { - span, ctx := h.startSpanFromContext(ctx, "eh.sender.newSender") - defer span.End() - - s := &sender{ - hub: h, - partitionID: h.senderPartitionID, - retryOptions: retryOptions, - cond: sync.NewCond(&sync.Mutex{}), - } - tab.For(ctx).Debug(fmt.Sprintf("creating a new sender for entity path %s", s.getAddress())) - err := s.newSessionAndLink(ctx) - return s, err -} - -func (s *sender) amqpSender() amqpSender { - // in reality, an *amqp.Sender - return s.sender.Load().(amqpSender) -} - -// Recover will attempt to close the current connectino, session and link, then rebuild them. -func (s *sender) Recover(ctx context.Context) error { - return s.recoverWithExpectedLinkID(ctx, "") -} - -// recoverWithExpectedLinkID attemps to recover the link as cheaply as possible. -// - It does not recover the link if expectedLinkID is not "" and does NOT match -// the current link ID, as this would indicate that the previous bad link has -// already been closed and removed. -func (s *sender) recoverWithExpectedLinkID(ctx context.Context, expectedLinkID string) error { - span, ctx := s.startProducerSpanFromContext(ctx, "eh.sender.Recover") - defer span.End() - - recover := false - - // acquire exclusive lock to see if this goroutine should recover - s.cond.L.Lock() // block 1 - - // if the link they started with has already been closed and removed we don't - // need to trigger an additional recovery. - if expectedLinkID != "" && s.amqpSender().LinkName() != expectedLinkID { - tab.For(ctx).Debug("original linkID does not match, no recovery necessary") - } else if !s.recovering { - // another goroutine isn't recovering, so this one will - tab.For(ctx).Debug("will recover connection") - s.recovering = true - recover = true - } else { - // wait for the recovery to finish - tab.For(ctx).Debug("waiting for connection to recover") - s.cond.Wait() - } - - s.cond.L.Unlock() - - var err error - if recover { - tab.For(ctx).Debug("recovering connection") - // we expect the sender, session or client is in an error state, ignore errors - closeCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - // update shared state - s.cond.L.Lock() // block 2 - - // TODO: we should be able to recover more quickly if we don't close the connection - // to recover (and just attempt to recreate the link). newSessionAndLink, currently, - // creates a new connection so we'd need to change that. - _ = s.amqpSender().Close(closeCtx) - _ = s.session.Close(closeCtx) - _ = s.connection.Close() - err = s.newSessionAndLink(ctx) - - s.recovering = false - s.cond.L.Unlock() - // signal to waiters that recovery is complete - s.cond.Broadcast() - } - return err -} - -// Close will close the AMQP connection, session and link of the sender -func (s *sender) Close(ctx context.Context) error { - span, _ := s.startProducerSpanFromContext(ctx, "eh.sender.Close") - defer span.End() - - err := s.amqpSender().Close(ctx) - if err != nil { - tab.For(ctx).Error(err) - if sessionErr := s.session.Close(ctx); sessionErr != nil { - tab.For(ctx).Error(sessionErr) - } - - if connErr := s.connection.Close(); connErr != nil { - tab.For(ctx).Error(connErr) - } - - return err - } - - if sessionErr := s.session.Close(ctx); sessionErr != nil { - tab.For(ctx).Error(sessionErr) - - if connErr := s.connection.Close(); connErr != nil { - tab.For(ctx).Error(connErr) - } - - return sessionErr - } - - return s.connection.Close() -} - -// Send will send a message to the entity path with options -// -// This will retry sending the message if the server responds with a busy error. -func (s *sender) Send(ctx context.Context, event *Event, opts ...SendOption) error { - span, ctx := s.startProducerSpanFromContext(ctx, "eh.sender.Send") - defer span.End() - - for _, opt := range opts { - err := opt(event) - if err != nil { - return err - } - } - - if event.ID == "" { - id, err := uuid.NewV4() - if err != nil { - return err - } - event.ID = id.String() - } - - return s.trySend(ctx, event) -} - -func (s *sender) trySend(ctx context.Context, evt eventer) error { - sp, ctx := s.startProducerSpanFromContext(ctx, "eh.sender.trySend") - defer sp.End() - - if err := sp.Inject(evt); err != nil { - tab.For(ctx).Error(err) - return err - } - - msg, err := evt.toMsg() - if err != nil { - tab.For(ctx).Error(err) - return err - } - - if str, ok := msg.Properties.MessageID.(string); ok { - sp.AddAttributes(tab.StringAttribute("he.message_id", str)) - } - - // create a per goroutine copy as Duration() and Reset() modify its state - backoff := s.retryOptions.recoveryBackoff.Copy() - - recvr := func(linkID string, err error, recover bool) { - duration := backoff.Duration() - tab.For(ctx).Debug("amqp error, delaying " + strconv.FormatInt(int64(duration/time.Millisecond), 10) + " millis: " + err.Error()) - select { - case <-time.After(duration): - // ok, continue to recover - case <-ctx.Done(): - // context expired, exit - return - } - if recover { - err = s.recoverWithExpectedLinkID(ctx, linkID) - if err != nil { - tab.For(ctx).Debug("failed to recover connection") - } else { - tab.For(ctx).Debug("recovered connection") - backoff.Reset() - } - } - } - - // try as long as the context is not dead - // successful send - // don't rebuild the connection in this case, just delay and try again - return sendMessage(ctx, s.amqpSender, s.retryOptions.maxRetries, msg, recvr) -} - -func sendMessage(ctx context.Context, getAmqpSender getAmqpSender, maxRetries int, msg *amqp.Message, recoverLink func(linkID string, err error, recover bool)) error { - var lastError error - - // maxRetries >= 0 == finite retries - // maxRetries < 0 == infinite retries - for i := 0; i < maxRetries+1 || maxRetries < 0; i++ { - select { - case <-ctx.Done(): - return ctx.Err() - default: - sender := getAmqpSender() - err := sender.Send(ctx, msg, nil) - if err == nil { - return err - } - - lastError = err - - switch e := err.(type) { - case *amqp.Error: - if e.Condition == errorServerBusy || e.Condition == errorTimeout { - recoverLink(sender.LinkName(), err, false) - break - } else if e.Condition == amqp.ErrCondMessageSizeExceeded || e.Condition == amqp.ErrCondTransferLimitExceeded { - return e - } - recoverLink(sender.LinkName(), err, true) - case net.Error: - recoverLink(sender.LinkName(), err, true) - default: - if !isRecoverableCloseError(err) { - return err - } - - recoverLink(sender.LinkName(), err, true) - } - } - } - - return lastError -} - -func (s *sender) String() string { - return s.Name -} - -func (s *sender) getAddress() string { - if s.partitionID != nil { - return fmt.Sprintf("%s/Partitions/%s", s.hub.name, *s.partitionID) - } - return s.hub.name -} - -func (s *sender) getFullIdentifier() string { - return s.hub.namespace.getEntityAudience(s.getAddress()) -} - -// newSessionAndLink will replace the existing connection, session and link -func (s *sender) newSessionAndLink(ctx context.Context) error { - span, ctx := s.startProducerSpanFromContext(ctx, "eh.sender.newSessionAndLink") - defer span.End() - - connection, err := s.hub.namespace.newConnection(ctx) - if err != nil { - tab.For(ctx).Error(err) - return err - } - s.connection = connection - - err = s.hub.namespace.negotiateClaim(ctx, connection, s.getAddress()) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - amqpSession, err := connection.NewSession(ctx, nil) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - amqpSender, err := amqpSession.NewSender(ctx, s.getAddress(), &amqp.SenderOptions{ - SettlementMode: amqp.SenderSettleModeMixed.Ptr(), - RequestedReceiverSettleMode: amqp.ReceiverSettleModeFirst.Ptr(), - }) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - s.session, err = newSession(amqpSession) - if err != nil { - tab.For(ctx).Error(err) - return err - } - - s.sender.Store(amqpSender) - return nil -} - -// SendWithMessageID configures the message with a message ID -func SendWithMessageID(messageID string) SendOption { - return func(event *Event) error { - event.ID = messageID - return nil - } -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/session.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/session.go deleted file mode 100644 index 43e808d0355..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/session.go +++ /dev/null @@ -1,53 +0,0 @@ -package eventhub - -// MIT License -// -// Copyright (c) Microsoft Corporation. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE - -import ( - "github.com/Azure/azure-amqp-common-go/v4/uuid" - "github.com/Azure/go-amqp" -) - -type ( - // session is a wrapper for the AMQP session with some added information to help with Service Bus messaging - session struct { - *amqp.Session - SessionID string - } -) - -// newSession is a constructor for a Service Bus session which will pre-populate the SessionID with a new UUID -func newSession(amqpSession *amqp.Session) (*session, error) { - sessionID, err := uuid.NewV4() - if err != nil { - return nil, err - } - - return &session{ - Session: amqpSession, - SessionID: sessionID.String(), - }, nil -} - -func (s *session) String() string { - return s.SessionID -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/tracing.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/tracing.go deleted file mode 100644 index 700e6f68783..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/tracing.go +++ /dev/null @@ -1,77 +0,0 @@ -package eventhub - -import ( - "context" - "net/http" - "os" - "strconv" - - "github.com/devigned/tab" -) - -func (h *Hub) startSpanFromContext(ctx context.Context, operationName string) (tab.Spanner, context.Context) { - ctx, span := tab.StartSpan(ctx, operationName) - ApplyComponentInfo(span) - return span, ctx -} - -func (ns *namespace) startSpanFromContext(ctx context.Context, operationName string) (tab.Spanner, context.Context) { - ctx, span := tab.StartSpan(ctx, operationName) - ApplyComponentInfo(span) - return span, ctx -} - -func (s *sender) startProducerSpanFromContext(ctx context.Context, operationName string) (tab.Spanner, context.Context) { - ctx, span := tab.StartSpan(ctx, operationName) - ApplyComponentInfo(span) - span.AddAttributes( - tab.StringAttribute("span.kind", "producer"), - tab.StringAttribute("message_bus.destination", s.getFullIdentifier()), - ) - return span, ctx -} - -func (r *receiver) startConsumerSpanFromContext(ctx context.Context, operationName string) (tab.Spanner, context.Context) { - ctx, span := tab.StartSpan(ctx, operationName) - ApplyComponentInfo(span) - span.AddAttributes( - tab.StringAttribute("span.kind", "consumer"), - tab.StringAttribute("message_bus.destination", r.getFullIdentifier()), - ) - return span, ctx -} - -func (em *entityManager) startSpanFromContext(ctx context.Context, operationName string) (tab.Spanner, context.Context) { - ctx, span := tab.StartSpan(ctx, operationName) - ApplyComponentInfo(span) - span.AddAttributes(tab.StringAttribute("span.kind", "client")) - return span, ctx -} - -// ApplyComponentInfo applies eventhub library and network info to the span -func ApplyComponentInfo(span tab.Spanner) { - span.AddAttributes( - tab.StringAttribute("component", "github.com/Azure/azure-event-hubs-go"), - tab.StringAttribute("version", Version)) - applyNetworkInfo(span) -} - -func applyNetworkInfo(span tab.Spanner) { - hostname, err := os.Hostname() - if err == nil { - span.AddAttributes(tab.StringAttribute("peer.hostname", hostname)) - } -} - -func applyRequestInfo(span tab.Spanner, req *http.Request) { - span.AddAttributes( - tab.StringAttribute("http.url", req.URL.String()), - tab.StringAttribute("http.method", req.Method), - ) -} - -func applyResponseInfo(span tab.Spanner, res *http.Response) { - if res != nil { - span.AddAttributes(tab.StringAttribute("http.status_code", strconv.Itoa(res.StatusCode))) - } -} diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/version.go b/vendor/github.com/Azure/azure-event-hubs-go/v3/version.go deleted file mode 100644 index 9609d028d0d..00000000000 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/version.go +++ /dev/null @@ -1,6 +0,0 @@ -package eventhub - -const ( - // Version is the semantic version number - Version = "3.6.2" -) diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go deleted file mode 100644 index d7b866cdf95..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go +++ /dev/null @@ -1,284 +0,0 @@ -package pipeline - -import ( - "context" - "github.com/mattn/go-ieproxy" - "net" - "net/http" - "os" - "time" -) - -// The Factory interface represents an object that can create its Policy object. Each HTTP request sent -// requires that this Factory create a new instance of its Policy object. -type Factory interface { - New(next Policy, po *PolicyOptions) Policy -} - -// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface. -type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc - -// New calls f(next,po). -func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy { - return f(next, po) -} - -// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process -// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned -// Response goes backward through the linked-list for additional processing. -// NOTE: Request is passed by value so changes do not change the caller's version of -// the request. However, Request has some fields that reference mutable objects (not strings). -// These references are copied; a deep copy is not performed. Specifically, this means that -// you should avoid modifying the objects referred to by these fields: URL, Header, Body, -// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response. -type Policy interface { - Do(ctx context.Context, request Request) (Response, error) -} - -// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface. -type PolicyFunc func(ctx context.Context, request Request) (Response, error) - -// Do calls f(ctx, request). -func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) { - return f(ctx, request) -} - -// Options configures a Pipeline's behavior. -type Options struct { - HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests. - Log LogOptions -} - -// LogLevel tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LogLevel uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LogLevel = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug -) - -// LogOptions configures the pipeline's logging mechanism & level filtering. -type LogOptions struct { - Log func(level LogLevel, message string) - - // ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not. - // An application can return different values over the its lifetime; this allows the application to dynamically - // alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure - // you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone). - // Usually, the function will be implemented simply like this: return level <= LogWarning - ShouldLog func(level LogLevel) bool -} - -type pipeline struct { - factories []Factory - options Options -} - -// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface. -// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest -// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a -// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where -// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects. -// -// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list. -// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network -// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects. -// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests. -type Pipeline interface { - Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) -} - -// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options. -func NewPipeline(factories []Factory, o Options) Pipeline { - if o.HTTPSender == nil { - o.HTTPSender = newDefaultHTTPClientFactory() - } - if o.Log.Log == nil { - o.Log.Log = func(LogLevel, string) {} // No-op logger - } - return &pipeline{factories: factories, options: o} -} - -// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object -// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request -// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and -// ultimately sends the transformed HTTP request over the network. -func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) { - response, err := p.newPolicies(methodFactory).Do(ctx, request) - request.close() - return response, err -} - -func (p *pipeline) newPolicies(methodFactory Factory) Policy { - // The last Policy is the one that actually sends the request over the wire and gets the response. - // It is overridable via the Options' HTTPSender field. - po := &PolicyOptions{pipeline: p} // One object shared by all policy objects - next := p.options.HTTPSender.New(nil, po) - - // Walk over the slice of Factory objects in reverse (from wire to API) - markers := 0 - for i := len(p.factories) - 1; i >= 0; i-- { - factory := p.factories[i] - if _, ok := factory.(methodFactoryMarker); ok { - markers++ - if markers > 1 { - panic("MethodFactoryMarker can only appear once in the pipeline") - } - if methodFactory != nil { - // Replace MethodFactoryMarker with passed-in methodFactory - next = methodFactory.New(next, po) - } - } else { - // Use the slice's Factory to construct its Policy - next = factory.New(next, po) - } - } - - // Each Factory has created its Policy - if markers == 0 && methodFactory != nil { - panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline") - } - return next // Return head of the Policy object linked-list -} - -// A PolicyOptions represents optional information that can be used by a node in the -// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method -// which passes it (if desired) to the Policy object it creates. Today, the Policy object -// uses the options to perform logging. But, in the future, this could be used for more. -type PolicyOptions struct { - pipeline *pipeline -} - -// ShouldLog returns true if the specified log level should be logged. -func (po *PolicyOptions) ShouldLog(level LogLevel) bool { - if po.pipeline.options.Log.ShouldLog != nil { - return po.pipeline.options.Log.ShouldLog(level) - } - return false -} - -// Log logs a string to the Pipeline's Logger. -func (po *PolicyOptions) Log(level LogLevel, msg string) { - if !po.ShouldLog(level) { - return // Short circuit message formatting if we're not logging it - } - - // We are logging it, ensure trailing newline - if len(msg) == 0 || msg[len(msg)-1] != '\n' { - msg += "\n" // Ensure trailing newline - } - po.pipeline.options.Log.Log(level, msg) - - // If logger doesn't handle fatal/panic, we'll do it here. - if level == LogFatal { - os.Exit(1) - } else if level == LogPanic { - panic(msg) - } -} - -var pipelineHTTPClient = newDefaultHTTPClient() - -func newDefaultHTTPClient() *http.Client { - // We want the Transport to have a large connection pool - return &http.Client{ - Transport: &http.Transport{ - Proxy: ieproxy.GetProxyFunc(), - // We use Dial instead of DialContext as DialContext has been reported to cause slower performance. - Dial /*Context*/ : (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, /*Context*/ - MaxIdleConns: 0, // No limit - MaxIdleConnsPerHost: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DisableKeepAlives: false, - DisableCompression: false, - MaxResponseHeaderBytes: 0, - //ResponseHeaderTimeout: time.Duration{}, - //ExpectContinueTimeout: time.Duration{}, - }, - } -} - -// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client. -func newDefaultHTTPClientFactory() Factory { - return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc { - return func(ctx context.Context, request Request) (Response, error) { - r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) - if err != nil { - err = NewError(err, "HTTP request failed") - } - return NewHTTPResponse(r), err - } - }) -} - -var mfm = methodFactoryMarker{} // Singleton - -// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any -// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's -// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created. -func MethodFactoryMarker() Factory { - return mfm -} - -type methodFactoryMarker struct { -} - -func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy { - panic("methodFactoryMarker policy should have been replaced with a method policy") -} - -// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog -// By default no implemetation is provided here, because pipeline may be used in many different -// contexts, so the correct implementation is context-dependent -type LogSanitizer interface { - SanitizeLogMessage(raw string) string -} - -var sanitizer LogSanitizer -var enableForceLog bool = true - -// SetLogSanitizer can be called to supply a custom LogSanitizer. -// There is no threadsafety or locking on the underlying variable, -// so call this function just once at startup of your application -// (Don't later try to change the sanitizer on the fly). -func SetLogSanitizer(s LogSanitizer)(){ - sanitizer = s -} - -// SetForceLogEnabled can be used to disable ForceLog -// There is no threadsafety or locking on the underlying variable, -// so call this function just once at startup of your application -// (Don't later try to change the setting on the fly). -func SetForceLogEnabled(enable bool)() { - enableForceLog = enable -} - - diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go deleted file mode 100644 index e7ce4970b8b..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go +++ /dev/null @@ -1,14 +0,0 @@ -package pipeline - - -// ForceLog should rarely be used. It forceable logs an entry to the -// Windows Event Log (on Windows) or to the SysLog (on Linux) -func ForceLog(level LogLevel, msg string) { - if !enableForceLog { - return - } - if sanitizer != nil { - msg = sanitizer.SanitizeLogMessage(msg) - } - forceLog(level, msg) -} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go deleted file mode 100644 index 819509a1e54..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build !windows,!nacl,!plan9 - -package pipeline - -import ( - "log" - "log/syslog" -) - -// forceLog should rarely be used. It forceable logs an entry to the -// Windows Event Log (on Windows) or to the SysLog (on Linux) -func forceLog(level LogLevel, msg string) { - if defaultLogger == nil { - return // Return fast if we failed to create the logger. - } - // We are logging it, ensure trailing newline - if len(msg) == 0 || msg[len(msg)-1] != '\n' { - msg += "\n" // Ensure trailing newline - } - switch level { - case LogFatal: - defaultLogger.Fatal(msg) - case LogPanic: - defaultLogger.Panic(msg) - case LogError, LogWarning, LogInfo: - defaultLogger.Print(msg) - } -} - -var defaultLogger = func() *log.Logger { - l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags) - return l -}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go deleted file mode 100644 index 5fcf40014aa..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -package pipeline - -import ( - "os" - "syscall" - "unsafe" -) - -// forceLog should rarely be used. It forceable logs an entry to the -// Windows Event Log (on Windows) or to the SysLog (on Linux) -func forceLog(level LogLevel, msg string) { - var el eventType - switch level { - case LogError, LogFatal, LogPanic: - el = elError - case LogWarning: - el = elWarning - case LogInfo: - el = elInfo - } - // We are logging it, ensure trailing newline - if len(msg) == 0 || msg[len(msg)-1] != '\n' { - msg += "\n" // Ensure trailing newline - } - reportEvent(el, 0, msg) -} - -type eventType int16 - -const ( - elSuccess eventType = 0 - elError eventType = 1 - elWarning eventType = 2 - elInfo eventType = 4 -) - -var reportEvent = func() func(eventType eventType, eventID int32, msg string) { - advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration - registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW") - - sourceName, _ := os.Executable() - sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName) - handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16))) - if lastErr == nil { // On error, logging is a no-op - return func(eventType eventType, eventID int32, msg string) {} - } - reportEvent := advAPI32.MustFindProc("ReportEventW") - return func(eventType eventType, eventID int32, msg string) { - s, _ := syscall.UTF16PtrFromString(msg) - _, _, _ = reportEvent.Call( - uintptr(handle), // HANDLE hEventLog - uintptr(eventType), // WORD wType - uintptr(0), // WORD wCategory - uintptr(eventID), // DWORD dwEventID - uintptr(0), // PSID lpUserSid - uintptr(1), // WORD wNumStrings - uintptr(0), // DWORD dwDataSize - uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings - uintptr(0)) // LPVOID lpRawData - } -}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go deleted file mode 100644 index b5ab05f4dee..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -/* -Package pipeline implements an HTTP request/response middleware pipeline whose -policy objects mutate an HTTP request's URL, query parameters, and/or headers before -the request is sent over the wire. - -Not all policy objects mutate an HTTP request; some policy objects simply impact the -flow of requests/responses by performing operations such as logging, retry policies, -timeouts, failure injection, and deserialization of response payloads. - -Implementing the Policy Interface - -To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do -method is called when an HTTP request wants to be sent over the network. Your Do method can perform any -operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query -parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object -in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy -object sends the HTTP request over the network (by calling the HTTPSender's Do method). - -When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response -(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure -or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response -to the code that initiated the original HTTP request. - -Here is a template for how to define a pipeline.Policy object: - - type myPolicy struct { - node PolicyNode - // TODO: Add configuration/setting fields here (if desired)... - } - - func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - // TODO: Mutate/process the HTTP request here... - response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response - // TODO: Mutate/process the HTTP response here... - return response, err // Return response/error to previous Policy - } - -Implementing the Factory Interface - -Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New -method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is -passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and -a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object -passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object. - -Here is a template for how to define a pipeline.Policy object: - - // NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable); - // this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently. - type myPolicyFactory struct { - // TODO: Add any configuration/setting fields if desired... - } - - func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy { - return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)... - } - -Using your Factory and Policy objects via a Pipeline - -To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes -this slice to the pipeline.NewPipeline function. - - func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline - -This function also requires an object implementing the HTTPSender interface. For simple scenarios, -passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually -send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender -object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects -or other objects that can simulate the network requests for testing purposes. - -Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple -wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a -context.Context for cancelling the HTTP request (if desired). - - type Pipeline interface { - Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) - } - -Do iterates over the slice of Factory objects and tells each one to create its corresponding -Policy object. After the linked-list of Policy objects have been created, Do calls the first -Policy object passing it the Context & HTTP request parameters. These parameters now flow through -all the Policy objects giving each object a chance to look at and/or mutate the HTTP request. -The last Policy object sends the message over the network. - -When the network operation completes, the HTTP response and error return values pass -back through the same Policy objects in reverse order. Most Policy objects ignore the -response/error but some log the result, retry the operation (depending on the exact -reason the operation failed), or deserialize the response's body. Your own Policy -objects can do whatever they like when processing outgoing requests or incoming responses. - -Note that after an I/O request runs to completion, the Policy objects for that request -are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing -them to be created once and reused over many I/O operations. This allows for efficient use of -memory and also makes them safely usable by multiple goroutines concurrently. - -Inserting a Method-Specific Factory into the Linked-List of Policy Objects - -While Pipeline and Factory objects can be reused over many different operations, it is -common to have special behavior for a specific operation/method. For example, a method -may need to deserialize the response's body to an instance of a specific data type. -To accommodate this, the Pipeline's Do method takes an additional method-specific -Factory object. The Do method tells this Factory to create a Policy object and -injects this method-specific Policy object into the linked-list of Policy objects. - -When creating a Pipeline object, the slice of Factory objects passed must have 1 -(and only 1) entry marking where the method-specific Factory should be injected. -The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function: - - func MethodFactoryMarker() pipeline.Factory - -Creating an HTTP Request Object - -The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct. -Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard -http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function: - - func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error) - -To this function, you must pass a pipeline.RequestOptions that looks like this: - - type RequestOptions struct { - // The readable and seekable stream to be sent to the server as the request's body. - Body io.ReadSeeker - - // The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request. - Progress ProgressReceiver - } - -The method and struct ensure that the request's body stream is a read/seekable stream. -A seekable stream is required so that upon retry, the final Policy object can seek -the stream back to the beginning before retrying the network request and re-uploading the -body. In addition, you can associate a ProgressReceiver callback function which will be -invoked periodically to report progress while bytes are being read from the body stream -and sent over the network. - -Processing the HTTP Response - -When an HTTP response comes in from the network, a reference to Go's http.Response struct is -embedded in a struct that implements the pipeline.Response interface: - - type Response interface { - Response() *http.Response - } - -This interface is returned through all the Policy objects. Each Policy object can call the Response -interface's Response method to examine (or mutate) the embedded http.Response object. - -A Policy object can internally define another struct (implementing the pipeline.Response interface) -that embeds an http.Response and adds additional fields and return this structure to other Policy -objects. This allows a Policy object to deserialize the body to some other struct and return the -original http.Response and the additional struct back through the Policy chain. Other Policy objects -can see the Response but cannot see the additional struct with the deserialized body. After all the -Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method. -The caller of this method can perform a type assertion attempting to get back to the struct type -really returned by the Policy object. If the type assertion is successful, the caller now has -access to both the http.Response and the deserialized struct object.*/ -package pipeline diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go deleted file mode 100644 index 5d3d4339e4f..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go +++ /dev/null @@ -1,184 +0,0 @@ -package pipeline - -import ( - "fmt" - "runtime" -) - -type causer interface { - Cause() error -} - -func errorWithPC(msg string, pc uintptr) string { - s := "" - if fn := runtime.FuncForPC(pc); fn != nil { - file, line := fn.FileLine(pc) - s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line) - } - s += msg + "\n\n" - return s -} - -func getPC(callersToSkip int) uintptr { - // Get the PC of Initialize method's caller. - pc := [1]uintptr{} - _ = runtime.Callers(callersToSkip, pc[:]) - return pc[0] -} - -// ErrorNode can be an embedded field in a private error object. This field -// adds Program Counter support and a 'cause' (reference to a preceding error). -// When initializing a error type with this embedded field, initialize the -// ErrorNode field by calling ErrorNode{}.Initialize(cause). -type ErrorNode struct { - pc uintptr // Represents a Program Counter that you can get symbols for. - cause error // Refers to the preceding error (or nil) -} - -// Error returns a string with the PC's symbols or "" if the PC is invalid. -// When defining a new error type, have its Error method call this one passing -// it the string representation of the error. -func (e *ErrorNode) Error(msg string) string { - s := errorWithPC(msg, e.pc) - if e.cause != nil { - s += e.cause.Error() + "\n" - } - return s -} - -// Cause returns the error that preceded this error. -func (e *ErrorNode) Cause() error { return e.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (e *ErrorNode) Unwrap() error { return e.cause } - -// Temporary returns true if the error occurred due to a temporary condition. -func (e ErrorNode) Temporary() bool { - type temporary interface { - Temporary() bool - } - - for err := e.cause; err != nil; { - if t, ok := err.(temporary); ok { - return t.Temporary() - } - - if cause, ok := err.(causer); ok { - err = cause.Cause() - } else { - err = nil - } - } - return false -} - -// Timeout returns true if the error occurred due to time expiring. -func (e ErrorNode) Timeout() bool { - type timeout interface { - Timeout() bool - } - - for err := e.cause; err != nil; { - if t, ok := err.(timeout); ok { - return t.Timeout() - } - - if cause, ok := err.(causer); ok { - err = cause.Cause() - } else { - err = nil - } - } - return false -} - -// Initialize is used to initialize an embedded ErrorNode field. -// It captures the caller's program counter and saves the cause (preceding error). -// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip -// value of 3 is very common; but, depending on your code nesting, you may need -// a different value. -func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode { - pc := getPC(callersToSkip) - return ErrorNode{pc: pc, cause: cause} -} - -// Cause walks all the preceding errors and return the originating error. -func Cause(err error) error { - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} - -// ErrorNodeNoCause can be an embedded field in a private error object. This field -// adds Program Counter support. -// When initializing a error type with this embedded field, initialize the -// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize(). -type ErrorNodeNoCause struct { - pc uintptr // Represents a Program Counter that you can get symbols for. -} - -// Error returns a string with the PC's symbols or "" if the PC is invalid. -// When defining a new error type, have its Error method call this one passing -// it the string representation of the error. -func (e *ErrorNodeNoCause) Error(msg string) string { - return errorWithPC(msg, e.pc) -} - -// Temporary returns true if the error occurred due to a temporary condition. -func (e ErrorNodeNoCause) Temporary() bool { - return false -} - -// Timeout returns true if the error occurred due to time expiring. -func (e ErrorNodeNoCause) Timeout() bool { - return false -} - -// Initialize is used to initialize an embedded ErrorNode field. -// It captures the caller's program counter. -// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip -// value of 3 is very common; but, depending on your code nesting, you may need -// a different value. -func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause { - pc := getPC(callersToSkip) - return ErrorNodeNoCause{pc: pc} -} - -// NewError creates a simple string error (like Error.New). But, this -// error also captures the caller's Program Counter and the preceding error (if provided). -func NewError(cause error, msg string) error { - if cause != nil { - return &pcError{ - ErrorNode: ErrorNode{}.Initialize(cause, 3), - msg: msg, - } - } - return &pcErrorNoCause{ - ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3), - msg: msg, - } -} - -// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause). -type pcError struct { - ErrorNode - msg string -} - -// Error satisfies the error interface. It shows the error with Program Counter -// symbols and calls Error on the preceding error so you can see the full error chain. -func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) } - -// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC). -type pcErrorNoCause struct { - ErrorNodeNoCause - msg string -} - -// Error satisfies the error interface. It shows the error with Program Counter symbols. -func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) } diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go deleted file mode 100644 index efa3c8ed06b..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go +++ /dev/null @@ -1,82 +0,0 @@ -package pipeline - -import "io" - -// ********** The following is common between the request body AND the response body. - -// ProgressReceiver defines the signature of a callback function invoked as progress is reported. -type ProgressReceiver func(bytesTransferred int64) - -// ********** The following are specific to the request body (a ReadSeekCloser) - -// This struct is used when sending a body to the network -type requestBodyProgress struct { - requestBody io.ReadSeeker // Seeking is required to support retries - pr ProgressReceiver -} - -// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream. -func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker { - if pr == nil { - panic("pr must not be nil") - } - return &requestBodyProgress{requestBody: requestBody, pr: pr} -} - -// Read reads a block of data from an inner stream and reports progress -func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) { - n, err = rbp.requestBody.Read(p) - if err != nil { - return - } - // Invokes the user's callback method to report progress - position, err := rbp.requestBody.Seek(0, io.SeekCurrent) - if err != nil { - panic(err) - } - rbp.pr(position) - return -} - -func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) { - return rbp.requestBody.Seek(offset, whence) -} - -// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. -func (rbp *requestBodyProgress) Close() error { - if c, ok := rbp.requestBody.(io.Closer); ok { - return c.Close() - } - return nil -} - -// ********** The following are specific to the response body (a ReadCloser) - -// This struct is used when sending a body to the network -type responseBodyProgress struct { - responseBody io.ReadCloser - pr ProgressReceiver - offset int64 -} - -// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream. -func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser { - if pr == nil { - panic("pr must not be nil") - } - return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0} -} - -// Read reads a block of data from an inner stream and reports progress -func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) { - n, err = rbp.responseBody.Read(p) - rbp.offset += int64(n) - - // Invokes the user's callback method to report progress - rbp.pr(rbp.offset) - return -} - -func (rbp *responseBodyProgress) Close() error { - return rbp.responseBody.Close() -} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go deleted file mode 100644 index 1fbe72bd4dd..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go +++ /dev/null @@ -1,147 +0,0 @@ -package pipeline - -import ( - "io" - "net/http" - "net/url" - "strconv" -) - -// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods. -type Request struct { - *http.Request -} - -// NewRequest initializes a new HTTP request object with any desired options. -func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) { - // Note: the url is passed by value so that any pipeline operations that modify it do so on a copy. - - // This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now. - request.Request = &http.Request{ - Method: method, - URL: &url, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: url.Host, - } - - if body != nil { - err = request.SetBody(body) - } - return -} - -// SetBody sets the body and content length, assumes body is not nil. -func (r Request) SetBody(body io.ReadSeeker) error { - size, err := body.Seek(0, io.SeekEnd) - if err != nil { - return err - } - - body.Seek(0, io.SeekStart) - r.ContentLength = size - r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)} - - if size != 0 { - r.Body = &retryableRequestBody{body: body} - r.GetBody = func() (io.ReadCloser, error) { - _, err := body.Seek(0, io.SeekStart) - if err != nil { - return nil, err - } - return r.Body, nil - } - } else { - // in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content - r.Body = http.NoBody - r.GetBody = func() (io.ReadCloser, error) { - return http.NoBody, nil - } - - // close the user-provided empty body - if c, ok := body.(io.Closer); ok { - c.Close() - } - } - - return nil -} - -// Copy makes a copy of an http.Request. Specifically, it makes a deep copy -// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close, -// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS, -// Cancel, Response, and ctx fields. Copy panics if any of these fields are -// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer. -func (r Request) Copy() Request { - if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil { - panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" + - "TransferEncoding, Form, PostForm, MultipartForm, or Trailer.") - } - copy := *r.Request // Copy the request - urlCopy := *(r.Request.URL) // Copy the URL - copy.URL = &urlCopy - copy.Header = http.Header{} // Copy the header - for k, vs := range r.Header { - for _, value := range vs { - copy.Header.Add(k, value) - } - } - return Request{Request: ©} // Return the copy -} - -func (r Request) close() error { - if r.Body != nil && r.Body != http.NoBody { - c, ok := r.Body.(*retryableRequestBody) - if !ok { - panic("unexpected request body type (should be *retryableReadSeekerCloser)") - } - return c.realClose() - } - return nil -} - -// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. -func (r Request) RewindBody() error { - if r.Body != nil && r.Body != http.NoBody { - s, ok := r.Body.(io.Seeker) - if !ok { - panic("unexpected request body type (should be io.Seeker)") - } - - // Reset the stream back to the beginning - _, err := s.Seek(0, io.SeekStart) - return err - } - return nil -} - -// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) - -// This struct is used when sending a body to the network -type retryableRequestBody struct { - body io.ReadSeeker // Seeking is required to support retries -} - -// Read reads a block of data from an inner stream and reports progress -func (b *retryableRequestBody) Read(p []byte) (n int, err error) { - return b.body.Read(p) -} - -func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { - return b.body.Seek(offset, whence) -} - -func (b *retryableRequestBody) Close() error { - // We don't want the underlying transport to close the request body on transient failures so this is a nop. - // The pipeline closes the request body upon success. - return nil -} - -func (b *retryableRequestBody) realClose() error { - if c, ok := b.body.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go deleted file mode 100644 index f2dc164821d..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go +++ /dev/null @@ -1,74 +0,0 @@ -package pipeline - -import ( - "bytes" - "fmt" - "net/http" - "sort" - "strings" -) - -// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects. -// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates -// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory). -// The method that injected the method-specific Factory gets this returned Response and performs a type assertion -// to the expected struct and returns the struct to its caller. -type Response interface { - Response() *http.Response -} - -// This is the default struct that has the http.Response. -// A method can replace this struct with its own struct containing an http.Response -// field and any other additional fields. -type httpResponse struct { - response *http.Response -} - -// NewHTTPResponse is typically called by a Policy object to return a Response object. -func NewHTTPResponse(response *http.Response) Response { - return &httpResponse{response: response} -} - -// This method satisfies the public Response interface's Response method -func (r httpResponse) Response() *http.Response { - return r.response -} - -// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are -// not nil, then these are also written into the Buffer. -func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) { - // Write the request into the buffer. - fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n") - writeHeader(b, request.Header) - if response != nil { - fmt.Fprintln(b, " --------------------------------------------------------------------------------") - fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") - writeHeader(b, response.Header) - } - if err != nil { - fmt.Fprintln(b, " --------------------------------------------------------------------------------") - fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") - } -} - -// formatHeaders appends an HTTP request's or response's header into a Buffer. -func writeHeader(b *bytes.Buffer, header map[string][]string) { - if len(header) == 0 { - b.WriteString(" (no headers)\n") - return - } - keys := make([]string, 0, len(header)) - // Alphabetize the headers - for k := range header { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - // Redact the value of any Authorization header to prevent security information from persisting in logs - value := interface{}("REDACTED") - if !strings.EqualFold(k, "Authorization") { - value = header[k] - } - fmt.Fprintf(b, " %s: %+v\n", k, value) - } -} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go deleted file mode 100644 index 899f996b542..00000000000 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go +++ /dev/null @@ -1,9 +0,0 @@ -package pipeline - -const ( - // UserAgent is the string to be used in the user agent string when making requests. - UserAgent = "azure-pipeline-go/" + Version - - // Version is the semantic version (see http://semver.org) of the pipeline package. - Version = "0.2.1" -) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt deleted file mode 100644 index a338672ec53..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE.txt +++ /dev/null @@ -1,29 +0,0 @@ -NOTICES AND INFORMATION -Do Not Translate or Localize - -This software incorporates material from third parties. Microsoft makes certain -open source code available at https://3rdpartysource.microsoft.com, or you may -send a check or money order for US $5.00, including the product name, the open -source component name, and version number, to: - -Source Code Compliance Team -Microsoft Corporation -One Microsoft Way -Redmond, WA 98052 -USA - -Notwithstanding any other terms, you may reverse engineer this software to the -extent required to debug changes to any libraries licensed under the GNU Lesser -General Public License. - ------------------------------------------------------------------------------- - -Azure SDK for Go uses third-party libraries or other resources that may be -distributed under licenses different than the Azure SDK for Go software. - -In the event that we accidentally failed to list a required notice, please -bring it to our attention. Post an issue or email us: - - azgosdkhelp@microsoft.com - -The attached notices are provided for information only. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go new file mode 100644 index 00000000000..c373cc43fd5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package arm + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = armpolicy.ClientOptions + +// Client is a HTTP client for use with ARM endpoints. It consists of an endpoint, pipeline, and tracing provider. +type Client struct { + ep string + pl runtime.Pipeline + tr tracing.Tracer +} + +// NewClient creates a new Client instance with the provided values. +// This client is intended to be used with Azure Resource Manager endpoints. +// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider. +// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider. +// - cred - the TokenCredential used to authenticate the request +// - options - optional client configurations; pass nil to accept the default values +func NewClient(moduleName, moduleVersion string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + + if !options.Telemetry.Disabled { + if err := shared.ValidateModVer(moduleVersion); err != nil { + return nil, err + } + } + + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, cred, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + + tr := options.TracingProvider.NewTracer(moduleName, moduleVersion) + return &Client{ep: ep, pl: pl, tr: tr}, nil +} + +// Endpoint returns the service's base URL for this client. +func (c *Client) Endpoint() string { + return c.ep +} + +// Pipeline returns the pipeline for this client. +func (c *Client) Pipeline() runtime.Pipeline { + return c.pl +} + +// Tracer returns the tracer for this client. +func (c *Client) Tracer() tracing.Tracer { + return c.tr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go new file mode 100644 index 00000000000..1bdd16a3d03 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package arm contains functionality specific to Azure Resource Manager clients. +package arm diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go new file mode 100644 index 00000000000..d35d6374fdc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go @@ -0,0 +1,23 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package arm + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" +) + +// RootResourceID defines the tenant as the root parent of all other ResourceID. +var RootResourceID = resource.RootResourceID + +// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`. +// Don't create this type directly, use ParseResourceID instead. +type ResourceID = resource.ResourceID + +// ParseResourceID parses a string to an instance of ResourceID +func ParseResourceID(id string) (*ResourceID, error) { + return resource.ParseResourceID(id) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go new file mode 100644 index 00000000000..fc7fbffd260 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package arm + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" +) + +// SubscriptionResourceType is the ResourceType of a subscription +var SubscriptionResourceType = resource.SubscriptionResourceType + +// ResourceGroupResourceType is the ResourceType of a resource group +var ResourceGroupResourceType = resource.ResourceGroupResourceType + +// TenantResourceType is the ResourceType of a tenant +var TenantResourceType = resource.TenantResourceType + +// ProviderResourceType is the ResourceType of a provider +var ProviderResourceType = resource.ProviderResourceType + +// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets". +// Don't create this type directly, use ParseResourceType or NewResourceType instead. +type ResourceType = resource.ResourceType + +// NewResourceType creates an instance of ResourceType using a provider namespace +// such as "Microsoft.Network" and type such as "virtualNetworks/subnets". +func NewResourceType(providerNamespace, typeName string) ResourceType { + return resource.NewResourceType(providerNamespace, typeName) +} + +// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets) +// or a resource identifier string. +// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet) +func ParseResourceType(resourceIDOrType string) (ResourceType, error) { + return resource.ParseResourceType(resourceIDOrType) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go index 08e8f005d2a..fd89cab620c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go @@ -14,5 +14,5 @@ const ( ModuleName = "azeventgrid" // ModuleVersion is the semantic version (see http://semver.org) of this module. - ModuleVersion = "v0.4.0" + ModuleVersion = "v0.5.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md index 873680e7be8..fc77f82e2d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md @@ -1,5 +1,7 @@ # Azure Event Grid Publisher Client Module for Go +**Please note this package has been moved to: [azeventgrid](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/eventgrid/azeventgrid).** + [Azure Event Grid](https://learn.microsoft.com/azure/event-grid/overview) is a highly scalable, fully managed Pub Sub message distribution service that offers flexible message consumption patterns. For more information about Event Grid see: [link](https://learn.microsoft.com/azure/event-grid/overview). The client in this package can publish events to [Event Grid topics](https://learn.microsoft.com/azure/event-grid/concepts). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/CHANGELOG.md new file mode 100644 index 00000000000..35ed4b321ca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/CHANGELOG.md @@ -0,0 +1,177 @@ +# Release History + +## 1.2.1 (2024-05-20) + +### Bugs Fixed + +- Emulator strings should allow for hosts other than localhost (PR#22898) + +## 1.2.0 (2024-05-07) + +### Bugs Fixed + +Processor.Run had unclear behavior for some cases: +- Run() now returns an explicit error when called more than once on a single + Processor instance or if multiple Run calls are made concurrently. (PR#22833) +- NextProcessorClient now properly terminates (and returns nil) if called on a + stopped Processor. (PR#22833) + +## 1.1.0 (2024-04-02) + +### Features Added + +- Add in ability to handle emulator connection strings. (PR#22663) + +### Bugs Fixed + +- Fixed a race condition between Processor.Run() and Processor.NextPartitionClient() where cancelling Run() quickly could lead to NextPartitionClient hanging indefinitely. (PR#22541) + +## 1.0.4 (2024-03-05) + +### Bugs Fixed + +- Fixed case where closing a Receiver/Sender after an idle period would take > 20 seconds. (PR#22509) + +## 1.0.3 (2024-01-16) + +### Bugs Fixed + +- Processor distributes partitions optimally, which would result in idle or over-assigned processors. (PR#22153) + +## 1.0.2 (2023-11-07) + +### Bugs Fixed + +- Processor now relinquishes ownership of partitions when it shuts down, making them immediately available to other active Processor instances. (PR#21899) + +## 1.0.1 (2023-06-06) + +### Bugs Fixed + +- GetPartitionProperties and GetEventHubProperties now retry properly on failures. (PR#20893) +- Connection recovery could artifically fail, prolonging recovery. (PR#20883) + +## 1.0.0 (2023-05-09) + +### Features Added + +- First stable release of the azeventhubs package. +- Authentication errors are indicated with an `azeventhubs.Error`, with a `Code` of `azeventhubs.ErrorCodeUnauthorizedAccess`. (PR#20450) + +### Bugs Fixed + +- Authentication errors could cause unnecessary retries, making calls taking longer to fail. (PR#20450) +- Recovery now includes internal timeouts and also handles restarting a connection if AMQP primitives aren't closed cleanly. +- Potential leaks for $cbs and $management when there was a partial failure. (PR#20564) +- Latest go-amqp changes have been merged in with fixes for robustness. +- Sending a message to an entity that is full will no longer retry. (PR#20722) +- Checkpoint store handles multiple initial owners properly, allowing only one through. (PR#20727) + +## 0.6.0 (2023-03-07) + +### Features Added + +- Added the `ConsumerClientOptions.InstanceID` field. This optional field can enhance error messages from + Event Hubs. For example, error messages related to ownership changes for a partition will contain the + name of the link that has taken ownership, which can help with traceability. + +### Breaking Changes + +- `ConsumerClient.ID()` renamed to `ConsumerClient.InstanceID()`. + +### Bugs Fixed + +- Recover the connection when the $cbs Receiver/Sender is not closed properly. This would cause + clients to return an error saying "$cbs node has already been opened." (PR#20334) + +## 0.5.0 (2023-02-07) + +### Features Added + +- Adds ProcessorOptions.Prefetch field, allowing configuration of Prefetch values for PartitionClients created using the Processor. (PR#19786) +- Added new function to parse connection string into values using `ParseConnectionString` and `ConnectionStringProperties`. (PR#19855) + +### Breaking Changes + +- ProcessorOptions.OwnerLevel has been removed. The Processor uses 0 as the owner level. +- Uses the public release of `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` package rather than using an internal copy. + For an example, see [example_consuming_with_checkpoints_test.go](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go). + +## 0.4.0 (2023-01-10) + +### Bugs Fixed + +- User-Agent was incorrectly formatted in our AMQP-based clients. (PR#19712) +- Connection recovery has been improved, removing some unnecessasry retries as well as adding a bound around + some operations (Close) that could potentially block recovery for a long time. (PR#19683) + +## 0.3.0 (2022-11-10) + +### Bugs Fixed + +- $cbs link is properly closed, even on cancellation (#19492) + +### Breaking Changes + +- ProducerClient.SendEventBatch renamed to ProducerClient.SendEventDataBatch, to align with + the name of the type. + +## 0.2.0 (2022-10-17) + +### Features Added + +- Raw AMQP message support, including full support for encoding Body (Value, Sequence and also multiple byte slices for Data). See ExampleEventDataBatch_AddEventData_rawAMQPMessages for some concrete examples. (PR#19156) +- Prefetch is now enabled by default. Prefetch allows the Event Hubs client to maintain a continuously full cache of events, controlled by PartitionClientOptions.Prefetch. (PR#19281) +- ConsumerClient.ID() returns a unique ID representing each instance of ConsumerClient. + +### Breaking Changes + +- EventDataBatch.NumMessages() renamed to EventDataBatch.NumEvents() +- Prefetch is now enabled by default. To disable it set PartitionClientOptions.Prefetch to -1. +- NewWebSocketConnArgs renamed to WebSocketConnParams +- Code renamed to ErrorCode, including associated constants like `ErrorCodeOwnershipLost`. +- OwnershipData, CheckpointData, and CheckpointStoreAddress have been folded into their individual structs: Ownership and Checkpoint. +- StartPosition and OwnerLevel were erroneously included in the ConsumerClientOptions struct - they've been removed. These can be + configured in the PartitionClientOptions. + +### Bugs Fixed + +- Retries now respect cancellation when they're in the "delay before next try" phase. (PR#19295) +- Fixed a potential leak which could cause us to open and leak a $cbs link connection, resulting in errors. (PR#19326) + +## 0.1.1 (2022-09-08) + +### Features Added + +- Adding in the new Processor type, which can be used to do distributed (and load balanced) consumption of events, using a + CheckpointStore. The built-in checkpoints.BlobStore uses Azure Blob Storage for persistence. A full example is + in [example_consuming_with_checkpoints_test.go](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go). + +### Breaking Changes + +- In the first beta, ConsumerClient took constructor parameter that required a partition ID, which meant you had to create + multiple ConsumerClients if you wanted to consume multiple partitions. ConsumerClient can now create multiple PartitionClient + instances (using ConsumerClient.NewPartitionClient), which allows you to share the same AMQP connection and receive from multiple + partitions simultaneously. +- Changes to EventData/ReceivedEventData: + + - ReceivedEventData now embeds EventData for fields common between the two, making it easier to change and resend. + - `ApplicationProperties` renamed to `Properties`. + - `PartitionKey` removed from `EventData`. To send events using a PartitionKey you must set it in the options + when creating the EventDataBatch: + + ```go + batch, err := producerClient.NewEventDataBatch(context.TODO(), &azeventhubs.NewEventDataBatchOptions{ + PartitionKey: to.Ptr("partition key"), + }) + ``` + +### Bugs Fixed + +- ReceivedEventData.Offset was incorrectly parsed, resulting in it always being 0. +- Added missing fields to ReceivedEventData and EventData (CorrelationID) +- PartitionKey property was not populated for messages sent via batch. + +## 0.1.0 (2022-08-11) + +- Initial preview for the new version of the Azure Event Hubs Go SDK. diff --git a/vendor/github.com/mattn/go-ieproxy/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/LICENSE.txt similarity index 86% rename from vendor/github.com/mattn/go-ieproxy/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/LICENSE.txt index 7b7c0f855af..b2f52a2bad4 100644 --- a/vendor/github.com/mattn/go-ieproxy/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/LICENSE.txt @@ -1,8 +1,6 @@ -MIT License +Copyright (c) Microsoft Corporation. -Copyright (c) 2014 mattn -Copyright (c) 2017 oliverpool -Copyright (c) 2019 Adele Reed +MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -14,7 +12,7 @@ furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/README.md new file mode 100644 index 00000000000..bd724a1810b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/README.md @@ -0,0 +1,133 @@ +# Azure Event Hubs Client Module for Go + +[Azure Event Hubs](https://azure.microsoft.com/services/event-hubs/) is a big data streaming platform and event ingestion service from Microsoft. For more information about Event Hubs see: [link](https://docs.microsoft.com/azure/event-hubs/event-hubs-about). + +Use the client library `github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs` in your application to: + +- Send events to an event hub. +- Consume events from an event hub. + +Key links: +- [Source code][source] +- [API Reference Documentation][godoc] +- [Product documentation](https://azure.microsoft.com/services/event-hubs/) +- [Samples][godoc_examples] + +## Getting started + +### Install the package + +Install the Azure Event Hubs client module for Go with `go get`: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs +``` + +### Prerequisites + +- Go, version 1.18 or higher +- An [Azure subscription](https://azure.microsoft.com/free/) +- An [Event Hub namespace](https://docs.microsoft.com/azure/event-hubs/). +- An Event Hub. You can create an event hub in your Event Hubs Namespace using the [Azure Portal](https://docs.microsoft.com/azure/event-hubs/event-hubs-create), or the [Azure CLI](https://docs.microsoft.com/azure/event-hubs/event-hubs-quickstart-cli). + +### Authenticate the client + +Event Hub clients are created using a TokenCredential from the [Azure Identity package][azure_identity_pkg], like [DefaultAzureCredential][default_azure_credential]. +You can also create a client using a connection string. + +#### Using a service principal + - ConsumerClient: [link](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#example-NewConsumerClient) + - ProducerClient: [link](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#example-NewProducerClient) + +#### Using a connection string + - ConsumerClient: [link](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#example-NewConsumerClientFromConnectionString) + - ProducerClient: [link](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#example-NewProducerClientFromConnectionString) + +# Key concepts + +An Event Hub [**namespace**](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#namespace) can have multiple event hubs. Each event hub, in turn, contains [**partitions**](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#partitions) which store events. + +Events are published to an event hub using an [event publisher](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#event-publishers). In this package, the event publisher is the [ProducerClient](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#ProducerClient) + +Events can be consumed from an event hub using an [event consumer](https://docs.microsoft.com/azure/event-hubs/event-hubs-features#event-consumers). In this package there are two types for consuming events: +- The basic event consumer is the, in the [ConsumerClient](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#ConsumerClient). This consumer is useful if you already known which partitions you want to receive from. +- A distributed event consumer, which uses Azure Blobs for checkpointing and coordination. This is implemented in the [Processor](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#Processor). This is useful when you want to have the partition assignment be dynamically chosen, and balanced with other Processor instances. + +For more information about Event Hubs features and terminology can be found here: [link](https://docs.microsoft.com/azure/event-hubs/event-hubs-features) + +# Examples + +Examples for various scenarios can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#pkg-examples) or in the example*_test.go files in our GitHub repo for [azeventhubs](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs). + +# Troubleshooting + +### Logging + +This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set the environment variable `AZURE_SDK_GO_LOGGING` to `all`. + +Use the `azcore/log` package to control log event output or to enable logs for `azeventhubs` only. For example: + +```go +import ( + "fmt" + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" +) + +// print log output to stdout +azlog.SetListener(func(event azlog.Event, s string) { + fmt.Printf("[%s] %s\n", event, s) +}) + +// pick the set of events to log +azlog.SetEvents( + azeventhubs.EventConn, + azeventhubs.EventAuth, + azeventhubs.EventProducer, + azeventhubs.EventConsumer, +) +``` + +## Contributing +For details on contributing to this repository, see the [contributing guide][azure_sdk_for_go_contributing]. + +This project welcomes contributions and suggestions. Most contributions require you to agree to a +Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us +the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide +a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions +provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +### Additional Helpful Links for Contributors +Many people all over the world have helped make this project better. You'll want to check out: + +* [What are some good first issues for new contributors to the repo?](https://github.com/azure/azure-sdk-for-go/issues?q=is%3Aopen+is%3Aissue+label%3A%22up+for+grabs%22) +* [How to build and test your change][azure_sdk_for_go_contributing_developer_guide] +* [How you can make a change happen!][azure_sdk_for_go_contributing_pull_requests] +* Frequently Asked Questions (FAQ) and Conceptual Topics in the detailed [Azure SDK for Go wiki](https://github.com/azure/azure-sdk-for-go/wiki). + + +### Reporting security issues and security bugs + +Security issues and bugs should be reported privately, via email, to the Microsoft Security Response Center (MSRC) . You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Further information, including the MSRC PGP key, can be found in the [Security TechCenter](https://www.microsoft.com/msrc/faqs-report-an-issue). + +### License + +Azure SDK for Go is licensed under the [MIT](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/LICENSE.txt) license. + + +[azure_sdk_for_go_contributing]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[azure_sdk_for_go_contributing_developer_guide]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md#developer-guide +[azure_sdk_for_go_contributing_pull_requests]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md#pull-requests + +[azure_identity_pkg]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[default_azure_credential]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#NewDefaultAzureCredential +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/messaging/azeventhubs +[godoc]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs +[godoc_examples]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#pkg-examples + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fmessaging%2Fazeventhubs%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/amqp_message.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/amqp_message.go new file mode 100644 index 00000000000..2e0bc54045f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/amqp_message.go @@ -0,0 +1,271 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "time" + + "github.com/Azure/go-amqp" +) + +// AMQPAnnotatedMessage represents the AMQP message, as received from Event Hubs. +// For details about these properties, refer to the AMQP specification: +// +// https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#section-message-format +// +// Some fields in this struct are typed 'any', which means they will accept AMQP primitives, or in some +// cases slices and maps. +// +// AMQP simple types include: +// - int (any size), uint (any size) +// - float (any size) +// - string +// - bool +// - time.Time +type AMQPAnnotatedMessage struct { + // ApplicationProperties corresponds to the "application-properties" section of an AMQP message. + // + // The values of the map are restricted to AMQP simple types, as listed in the comment for AMQPAnnotatedMessage. + ApplicationProperties map[string]any + + // Body represents the body of an AMQP message. + Body AMQPAnnotatedMessageBody + + // DeliveryAnnotations corresponds to the "delivery-annotations" section in an AMQP message. + // + // The values of the map are restricted to AMQP simple types, as listed in the comment for AMQPAnnotatedMessage. + DeliveryAnnotations map[any]any + + // DeliveryTag corresponds to the delivery-tag property of the TRANSFER frame + // for this message. + DeliveryTag []byte + + // Footer is the transport footers for this AMQP message. + // + // The values of the map are restricted to AMQP simple types, as listed in the comment for AMQPAnnotatedMessage. + Footer map[any]any + + // Header is the transport headers for this AMQP message. + Header *AMQPAnnotatedMessageHeader + + // MessageAnnotations corresponds to the message-annotations section of an AMQP message. + // + // The values of the map are restricted to AMQP simple types, as listed in the comment for AMQPAnnotatedMessage. + MessageAnnotations map[any]any + + // Properties corresponds to the properties section of an AMQP message. + Properties *AMQPAnnotatedMessageProperties +} + +// AMQPAnnotatedMessageProperties represents the properties of an AMQP message. +// See here for more details: +// http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-properties +type AMQPAnnotatedMessageProperties struct { + // AbsoluteExpiryTime corresponds to the 'absolute-expiry-time' property. + AbsoluteExpiryTime *time.Time + + // ContentEncoding corresponds to the 'content-encoding' property. + ContentEncoding *string + + // ContentType corresponds to the 'content-type' property + ContentType *string + + // CorrelationID corresponds to the 'correlation-id' property. + // The type of CorrelationID can be a uint64, UUID, []byte, or a string + CorrelationID any + + // CreationTime corresponds to the 'creation-time' property. + CreationTime *time.Time + + // GroupID corresponds to the 'group-id' property. + GroupID *string + + // GroupSequence corresponds to the 'group-sequence' property. + GroupSequence *uint32 + + // MessageID corresponds to the 'message-id' property. + // The type of MessageID can be a uint64, UUID, []byte, or string + MessageID any + + // ReplyTo corresponds to the 'reply-to' property. + ReplyTo *string + + // ReplyToGroupID corresponds to the 'reply-to-group-id' property. + ReplyToGroupID *string + + // Subject corresponds to the 'subject' property. + Subject *string + + // To corresponds to the 'to' property. + To *string + + // UserID corresponds to the 'user-id' property. + UserID []byte +} + +// AMQPAnnotatedMessageBody represents the body of an AMQP message. +// Only one of these fields can be used a a time. They are mutually exclusive. +type AMQPAnnotatedMessageBody struct { + // Data is encoded/decoded as multiple data sections in the body. + Data [][]byte + + // Sequence is encoded/decoded as one or more amqp-sequence sections in the body. + // + // The values of the slices are are restricted to AMQP simple types, as listed in the comment for AMQPAnnotatedMessage. + Sequence [][]any + + // Value is encoded/decoded as the amqp-value section in the body. + // + // The type of Value can be any of the AMQP simple types, as listed in the comment for AMQPAnnotatedMessage, + // as well as slices or maps of AMQP simple types. + Value any +} + +// AMQPAnnotatedMessageHeader carries standard delivery details about the transfer +// of a message. +// See https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-header +// for more details. +type AMQPAnnotatedMessageHeader struct { + // DeliveryCount is the number of unsuccessful previous attempts to deliver this message. + // It corresponds to the 'delivery-count' property. + DeliveryCount uint32 + + // Durable corresponds to the 'durable' property. + Durable bool + + // FirstAcquirer corresponds to the 'first-acquirer' property. + FirstAcquirer bool + + // Priority corresponds to the 'priority' property. + Priority uint8 + + // TTL corresponds to the 'ttl' property. + TTL time.Duration +} + +// toAMQPMessage converts between our (azeventhubs) AMQP message +// to the underlying message used by go-amqp. +func (am *AMQPAnnotatedMessage) toAMQPMessage() *amqp.Message { + var header *amqp.MessageHeader + + if am.Header != nil { + header = &amqp.MessageHeader{ + DeliveryCount: am.Header.DeliveryCount, + Durable: am.Header.Durable, + FirstAcquirer: am.Header.FirstAcquirer, + Priority: am.Header.Priority, + TTL: am.Header.TTL, + } + } + + var properties *amqp.MessageProperties + + if am.Properties != nil { + properties = &amqp.MessageProperties{ + AbsoluteExpiryTime: am.Properties.AbsoluteExpiryTime, + ContentEncoding: am.Properties.ContentEncoding, + ContentType: am.Properties.ContentType, + CorrelationID: am.Properties.CorrelationID, + CreationTime: am.Properties.CreationTime, + GroupID: am.Properties.GroupID, + GroupSequence: am.Properties.GroupSequence, + MessageID: am.Properties.MessageID, + ReplyTo: am.Properties.ReplyTo, + ReplyToGroupID: am.Properties.ReplyToGroupID, + Subject: am.Properties.Subject, + To: am.Properties.To, + UserID: am.Properties.UserID, + } + } else { + properties = &amqp.MessageProperties{} + } + + var footer amqp.Annotations + + if am.Footer != nil { + footer = (amqp.Annotations)(am.Footer) + } + + return &amqp.Message{ + Annotations: copyAnnotations(am.MessageAnnotations), + ApplicationProperties: am.ApplicationProperties, + Data: am.Body.Data, + DeliveryAnnotations: amqp.Annotations(am.DeliveryAnnotations), + DeliveryTag: am.DeliveryTag, + Footer: footer, + Header: header, + Properties: properties, + Sequence: am.Body.Sequence, + Value: am.Body.Value, + } +} + +func copyAnnotations(src map[any]any) amqp.Annotations { + if src == nil { + return amqp.Annotations{} + } + + dest := amqp.Annotations{} + + for k, v := range src { + dest[k] = v + } + + return dest +} + +func newAMQPAnnotatedMessage(goAMQPMessage *amqp.Message) *AMQPAnnotatedMessage { + var header *AMQPAnnotatedMessageHeader + + if goAMQPMessage.Header != nil { + header = &AMQPAnnotatedMessageHeader{ + DeliveryCount: goAMQPMessage.Header.DeliveryCount, + Durable: goAMQPMessage.Header.Durable, + FirstAcquirer: goAMQPMessage.Header.FirstAcquirer, + Priority: goAMQPMessage.Header.Priority, + TTL: goAMQPMessage.Header.TTL, + } + } + + var properties *AMQPAnnotatedMessageProperties + + if goAMQPMessage.Properties != nil { + properties = &AMQPAnnotatedMessageProperties{ + AbsoluteExpiryTime: goAMQPMessage.Properties.AbsoluteExpiryTime, + ContentEncoding: goAMQPMessage.Properties.ContentEncoding, + ContentType: goAMQPMessage.Properties.ContentType, + CorrelationID: goAMQPMessage.Properties.CorrelationID, + CreationTime: goAMQPMessage.Properties.CreationTime, + GroupID: goAMQPMessage.Properties.GroupID, + GroupSequence: goAMQPMessage.Properties.GroupSequence, + MessageID: goAMQPMessage.Properties.MessageID, + ReplyTo: goAMQPMessage.Properties.ReplyTo, + ReplyToGroupID: goAMQPMessage.Properties.ReplyToGroupID, + Subject: goAMQPMessage.Properties.Subject, + To: goAMQPMessage.Properties.To, + UserID: goAMQPMessage.Properties.UserID, + } + } + + var footer map[any]any + + if goAMQPMessage.Footer != nil { + footer = (map[any]any)(goAMQPMessage.Footer) + } + + return &AMQPAnnotatedMessage{ + MessageAnnotations: map[any]any(goAMQPMessage.Annotations), + ApplicationProperties: goAMQPMessage.ApplicationProperties, + Body: AMQPAnnotatedMessageBody{ + Data: goAMQPMessage.Data, + Sequence: goAMQPMessage.Sequence, + Value: goAMQPMessage.Value, + }, + DeliveryAnnotations: map[any]any(goAMQPMessage.DeliveryAnnotations), + DeliveryTag: goAMQPMessage.DeliveryTag, + Footer: footer, + Header: header, + Properties: properties, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/checkpoint_store.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/checkpoint_store.go new file mode 100644 index 00000000000..83c1c3e54fa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/checkpoint_store.go @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "context" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +// CheckpointStore is used by multiple consumers to coordinate progress and ownership for partitions. +type CheckpointStore interface { + // ClaimOwnership attempts to claim ownership of the partitions in partitionOwnership and returns + // the actual partitions that were claimed. + ClaimOwnership(ctx context.Context, partitionOwnership []Ownership, options *ClaimOwnershipOptions) ([]Ownership, error) + + // ListCheckpoints lists all the available checkpoints. + ListCheckpoints(ctx context.Context, fullyQualifiedNamespace string, eventHubName string, consumerGroup string, options *ListCheckpointsOptions) ([]Checkpoint, error) + + // ListOwnership lists all ownerships. + ListOwnership(ctx context.Context, fullyQualifiedNamespace string, eventHubName string, consumerGroup string, options *ListOwnershipOptions) ([]Ownership, error) + + // SetCheckpoint updates a specific checkpoint with a sequence and offset. + SetCheckpoint(ctx context.Context, checkpoint Checkpoint, options *SetCheckpointOptions) error +} + +// Ownership tracks which consumer owns a particular partition. +type Ownership struct { + ConsumerGroup string + EventHubName string + FullyQualifiedNamespace string + PartitionID string + + OwnerID string // the owner ID of the Processor + LastModifiedTime time.Time // used when calculating if ownership has expired + ETag *azcore.ETag // the ETag, used when attempting to claim or update ownership of a partition. +} + +// Checkpoint tracks the last succesfully processed event in a partition. +type Checkpoint struct { + ConsumerGroup string + EventHubName string + FullyQualifiedNamespace string + PartitionID string + + Offset *int64 // the last succesfully processed Offset. + SequenceNumber *int64 // the last succesfully processed SequenceNumber. +} + +// ListCheckpointsOptions contains optional parameters for the ListCheckpoints function +type ListCheckpointsOptions struct { + // For future expansion +} + +// ListOwnershipOptions contains optional parameters for the ListOwnership function +type ListOwnershipOptions struct { + // For future expansion +} + +// SetCheckpointOptions contains optional parameters for the UpdateCheckpoint function +type SetCheckpointOptions struct { + // For future expansion +} + +// ClaimOwnershipOptions contains optional parameters for the ClaimOwnership function +type ClaimOwnershipOptions struct { + // For future expansion +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/ci.yml new file mode 100644 index 00000000000..ab79ba00916 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/ci.yml @@ -0,0 +1,35 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/messaging/azeventhubs + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/messaging/azeventhubs + +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'messaging/azeventhubs' + # (live tests not yet ready to run) + RunLiveTests: true + SupportedClouds: 'Public,UsGov,China' + EnvVars: + AZURE_CLIENT_ID: $(AZEVENTHUBS_CLIENT_ID) + AZURE_TENANT_ID: $(AZEVENTHUBS_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZEVENTHUBS_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZEVENTHUBS_SUBSCRIPTION_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/connection_string_properties.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/connection_string_properties.go new file mode 100644 index 00000000000..d28e837e237 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/connection_string_properties.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + +// ConnectionStringProperties are the properties of a connection string +// as returned by [ParseConnectionString]. +type ConnectionStringProperties = exported.ConnectionStringProperties + +// ParseConnectionString takes a connection string from the Azure portal and returns the +// parsed representation. +// +// There are two supported formats: +// 1. Connection strings generated from the portal (or elsewhere) that contain an embedded key and keyname. +// 2. A connection string with an embedded SharedAccessSignature: +// Endpoint=sb://.servicebus.windows.net;SharedAccessSignature=SharedAccessSignature sr=.servicebus.windows.net&sig=&se=&skn=" +func ParseConnectionString(connStr string) (ConnectionStringProperties, error) { + return exported.ParseConnectionString(connStr) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/consumer_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/consumer_client.go new file mode 100644 index 00000000000..84716b4d012 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/consumer_client.go @@ -0,0 +1,262 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "context" + "crypto/tls" + "fmt" + "net" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" +) + +// ConsumerClientOptions configures optional parameters for a ConsumerClient. +type ConsumerClientOptions struct { + // ApplicationID is used as the identifier when setting the User-Agent property. + ApplicationID string + + // InstanceID is a unique name used to identify the consumer. This can help with + // diagnostics as this name will be returned in error messages. By default, + // an identifier will be automatically generated. + InstanceID string + + // NewWebSocketConn is a function that can create a net.Conn for use with websockets. + // For an example, see ExampleNewClient_usingWebsockets() function in example_client_test.go. + NewWebSocketConn func(ctx context.Context, args WebSocketConnParams) (net.Conn, error) + + // RetryOptions controls how often operations are retried from this client and any + // Receivers and Senders created from this client. + RetryOptions RetryOptions + + // TLSConfig configures a client with a custom *tls.Config. + TLSConfig *tls.Config +} + +// ConsumerClient can create PartitionClient instances, which can read events from +// a partition. +type ConsumerClient struct { + consumerGroup string + eventHub string + + // instanceID is a customer supplied instanceID that can be passed to Event Hubs. + // It'll be returned in error messages and can be useful for customers when + // troubleshooting. + instanceID string + + links *internal.Links[amqpwrap.RPCLink] + namespace *internal.Namespace + retryOptions RetryOptions +} + +// NewConsumerClient creates a ConsumerClient which uses an azcore.TokenCredential for authentication. You +// MUST call [ConsumerClient.Close] on this client to avoid leaking resources. +// +// The fullyQualifiedNamespace is the Event Hubs namespace name (ex: myeventhub.servicebus.windows.net) +// The credential is one of the credentials in the [azidentity] package. +// +// [azidentity]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity +func NewConsumerClient(fullyQualifiedNamespace string, eventHub string, consumerGroup string, credential azcore.TokenCredential, options *ConsumerClientOptions) (*ConsumerClient, error) { + return newConsumerClient(consumerClientArgs{ + consumerGroup: consumerGroup, + fullyQualifiedNamespace: fullyQualifiedNamespace, + eventHub: eventHub, + credential: credential, + }, options) +} + +// NewConsumerClientFromConnectionString creates a ConsumerClient from a connection string. You +// MUST call [ConsumerClient.Close] on this client to avoid leaking resources. +// +// connectionString can be one of two formats - with or without an EntityPath key. +// +// When the connection string does not have an entity path, as shown below, the eventHub parameter cannot +// be empty and should contain the name of your event hub. +// +// Endpoint=sb://.servicebus.windows.net/;SharedAccessKeyName=;SharedAccessKey= +// +// When the connection string DOES have an entity path, as shown below, the eventHub parameter must be empty. +// +// Endpoint=sb://.servicebus.windows.net/;SharedAccessKeyName=;SharedAccessKey=;EntityPath=; +func NewConsumerClientFromConnectionString(connectionString string, eventHub string, consumerGroup string, options *ConsumerClientOptions) (*ConsumerClient, error) { + props, err := parseConn(connectionString, eventHub) + + if err != nil { + return nil, err + } + + return newConsumerClient(consumerClientArgs{ + consumerGroup: consumerGroup, + connectionString: connectionString, + eventHub: *props.EntityPath, + }, options) +} + +// PartitionClientOptions provides options for the NewPartitionClient function. +type PartitionClientOptions struct { + // StartPosition is the position we will start receiving events from, + // either an offset (inclusive) with Offset, or receiving events received + // after a specific time using EnqueuedTime. + // + // NOTE: you can also use the [Processor], which will automatically manage the start + // value using a [CheckpointStore]. See [example_consuming_with_checkpoints_test.go] for an + // example. + // + // [example_consuming_with_checkpoints_test.go]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go + StartPosition StartPosition + + // OwnerLevel is the priority for this partition client, also known as the 'epoch' level. + // When used, a partition client with a higher OwnerLevel will take ownership of a partition + // from partition clients with a lower OwnerLevel. + // Default is off. + OwnerLevel *int64 + + // Prefetch represents the size of the internal prefetch buffer. When set, + // this client will attempt to always maintain an internal cache of events of + // this size, asynchronously, increasing the odds that ReceiveEvents() will use + // a locally stored cache of events, rather than having to wait for events to + // arrive from the network. + // + // Defaults to 300 events if Prefetch == 0. + // Disabled if Prefetch < 0. + Prefetch int32 +} + +// NewPartitionClient creates a client that can receive events from a partition. By default it starts +// at the latest point in the partition. This can be changed using the options parameter. +// You MUST call [azeventhubs.PartitionClient.Close] on the returned client to avoid leaking resources. +func (cc *ConsumerClient) NewPartitionClient(partitionID string, options *PartitionClientOptions) (*PartitionClient, error) { + return newPartitionClient(partitionClientArgs{ + namespace: cc.namespace, + eventHub: cc.eventHub, + partitionID: partitionID, + instanceID: cc.instanceID, + consumerGroup: cc.consumerGroup, + retryOptions: cc.retryOptions, + }, options) +} + +// GetEventHubProperties gets event hub properties, like the available partition IDs and when the Event Hub was created. +func (cc *ConsumerClient) GetEventHubProperties(ctx context.Context, options *GetEventHubPropertiesOptions) (EventHubProperties, error) { + return getEventHubProperties(ctx, EventConsumer, cc.namespace, cc.links, cc.eventHub, cc.retryOptions, options) +} + +// GetPartitionProperties gets properties for a specific partition. This includes data like the +// last enqueued sequence number, the first sequence number and when an event was last enqueued +// to the partition. +func (cc *ConsumerClient) GetPartitionProperties(ctx context.Context, partitionID string, options *GetPartitionPropertiesOptions) (PartitionProperties, error) { + return getPartitionProperties(ctx, EventConsumer, cc.namespace, cc.links, cc.eventHub, partitionID, cc.retryOptions, options) +} + +// InstanceID is the identifier for this ConsumerClient. +func (cc *ConsumerClient) InstanceID() string { + return cc.instanceID +} + +type consumerClientDetails struct { + FullyQualifiedNamespace string + ConsumerGroup string + EventHubName string + ClientID string +} + +func (cc *ConsumerClient) getDetails() consumerClientDetails { + return consumerClientDetails{ + FullyQualifiedNamespace: cc.namespace.FQDN, + ConsumerGroup: cc.consumerGroup, + EventHubName: cc.eventHub, + ClientID: cc.InstanceID(), + } +} + +// Close releases resources for this client. +func (cc *ConsumerClient) Close(ctx context.Context) error { + return cc.namespace.Close(ctx, true) +} + +type consumerClientArgs struct { + connectionString string + + // the Event Hubs namespace name (ex: myservicebus.servicebus.windows.net) + fullyQualifiedNamespace string + credential azcore.TokenCredential + + consumerGroup string + eventHub string +} + +func newConsumerClient(args consumerClientArgs, options *ConsumerClientOptions) (*ConsumerClient, error) { + if options == nil { + options = &ConsumerClientOptions{} + } + + instanceID, err := getInstanceID(options.InstanceID) + + if err != nil { + return nil, err + } + + client := &ConsumerClient{ + consumerGroup: args.consumerGroup, + eventHub: args.eventHub, + instanceID: instanceID, + } + + var nsOptions []internal.NamespaceOption + + if args.connectionString != "" { + nsOptions = append(nsOptions, internal.NamespaceWithConnectionString(args.connectionString)) + } else if args.credential != nil { + option := internal.NamespaceWithTokenCredential( + args.fullyQualifiedNamespace, + args.credential) + + nsOptions = append(nsOptions, option) + } + + client.retryOptions = options.RetryOptions + + if options.TLSConfig != nil { + nsOptions = append(nsOptions, internal.NamespaceWithTLSConfig(options.TLSConfig)) + } + + if options.NewWebSocketConn != nil { + nsOptions = append(nsOptions, internal.NamespaceWithWebSocket(options.NewWebSocketConn)) + } + + if options.ApplicationID != "" { + nsOptions = append(nsOptions, internal.NamespaceWithUserAgent(options.ApplicationID)) + } + + nsOptions = append(nsOptions, internal.NamespaceWithRetryOptions(options.RetryOptions)) + + tempNS, err := internal.NewNamespace(nsOptions...) + + if err != nil { + return nil, err + } + + client.namespace = tempNS + client.links = internal.NewLinks[amqpwrap.RPCLink](tempNS, fmt.Sprintf("%s/$management", client.eventHub), nil, nil) + + return client, nil +} + +func getInstanceID(optionalID string) (string, error) { + if optionalID != "" { + return optionalID, nil + } + + // generate a new one + id, err := uuid.New() + + if err != nil { + return "", err + } + + return id.String(), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/doc.go new file mode 100644 index 00000000000..25375f6dc9c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/doc.go @@ -0,0 +1,15 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package azeventhubs provides clients for sending events and consuming events. +// +// For sending events, use the [ProducerClient]. +// +// There are two clients for consuming events: +// - [Processor], which handles checkpointing and load balancing using durable storage. +// - [ConsumerClient], which is fully manual, but provides full control. + +package azeventhubs diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/error.go new file mode 100644 index 00000000000..39a7eaa016d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/error.go @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + +// Error represents an Event Hub specific error. +// NOTE: the Code is considered part of the published API but the message that +// comes back from Error(), as well as the underlying wrapped error, are NOT and +// are subject to change. +type Error = exported.Error + +// ErrorCode is an error code, usable by consuming code to work with +// programatically. +type ErrorCode = exported.ErrorCode + +const ( + // ErrorCodeUnauthorizedAccess means the credentials provided are not valid for use with + // a particular entity, or have expired. + ErrorCodeUnauthorizedAccess ErrorCode = exported.ErrorCodeUnauthorizedAccess + + // ErrorCodeConnectionLost means our connection was lost and all retry attempts failed. + // This typically reflects an extended outage or connection disruption and may + // require manual intervention. + ErrorCodeConnectionLost ErrorCode = exported.ErrorCodeConnectionLost + + // ErrorCodeOwnershipLost means that a partition that you were reading from was opened + // by another link with a higher epoch/owner level. + ErrorCodeOwnershipLost ErrorCode = exported.ErrorCodeOwnershipLost +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data.go new file mode 100644 index 00000000000..00b89a3ca0e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data.go @@ -0,0 +1,195 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "errors" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh" + "github.com/Azure/go-amqp" +) + +// EventData is an event that can be sent, using the ProducerClient, to an Event Hub. +type EventData struct { + // Properties can be used to store custom metadata for a message. + Properties map[string]any + + // Body is the payload for a message. + Body []byte + + // ContentType describes the payload of the message, with a descriptor following + // the format of Content-Type, specified by RFC2045 (ex: "application/json"). + ContentType *string + + // CorrelationID is a client-specific id that can be used to mark or identify messages + // between clients. + // CorrelationID can be a uint64, UUID, []byte, or string + CorrelationID any + + // MessageID is an application-defined value that uniquely identifies + // the message and its payload. The identifier is a free-form string. + // + // If enabled, the duplicate detection feature identifies and removes further submissions + // of messages with the same MessageId. + MessageID *string +} + +// ReceivedEventData is an event that has been received using the ConsumerClient. +type ReceivedEventData struct { + EventData + + // EnqueuedTime is the UTC time when the message was accepted and stored by Event Hubs. + EnqueuedTime *time.Time + + // PartitionKey is used with a partitioned entity and enables assigning related messages + // to the same internal partition. This ensures that the submission sequence order is correctly + // recorded. The partition is chosen by a hash function in Event Hubs and cannot be chosen + // directly. + PartitionKey *string + + // Offset is the offset of the event. + Offset int64 + + // RawAMQPMessage is the AMQP message, as received by the client. This can be useful to get access + // to properties that are not exposed by ReceivedEventData such as payloads encoded into the + // Value or Sequence section, payloads sent as multiple Data sections, as well as Footer + // and Header fields. + RawAMQPMessage *AMQPAnnotatedMessage + + // SequenceNumber is a unique number assigned to a message by Event Hubs. + SequenceNumber int64 + + // Properties set by the Event Hubs service. + SystemProperties map[string]any +} + +// Event Hubs custom properties +const ( + // Annotation properties + partitionKeyAnnotation = "x-opt-partition-key" + sequenceNumberAnnotation = "x-opt-sequence-number" + offsetNumberAnnotation = "x-opt-offset" + enqueuedTimeAnnotation = "x-opt-enqueued-time" +) + +func (e *EventData) toAMQPMessage() *amqp.Message { + amqpMsg := amqp.NewMessage(e.Body) + + var messageID any + + if e.MessageID != nil { + messageID = *e.MessageID + } + + amqpMsg.Properties = &amqp.MessageProperties{ + MessageID: messageID, + } + + amqpMsg.Properties.ContentType = e.ContentType + amqpMsg.Properties.CorrelationID = e.CorrelationID + + if len(e.Properties) > 0 { + amqpMsg.ApplicationProperties = make(map[string]any) + for key, value := range e.Properties { + amqpMsg.ApplicationProperties[key] = value + } + } + + return amqpMsg +} + +// newReceivedEventData creates a received message from an AMQP message. +// NOTE: this converter assumes that the Body of this message will be the first +// serialized byte array in the Data section of the messsage. +func newReceivedEventData(amqpMsg *amqp.Message) (*ReceivedEventData, error) { + re := &ReceivedEventData{ + RawAMQPMessage: newAMQPAnnotatedMessage(amqpMsg), + } + + if len(amqpMsg.Data) == 1 { + re.Body = amqpMsg.Data[0] + } + + if amqpMsg.Properties != nil { + if id, ok := amqpMsg.Properties.MessageID.(string); ok { + re.MessageID = &id + } + + re.ContentType = amqpMsg.Properties.ContentType + re.CorrelationID = amqpMsg.Properties.CorrelationID + } + + if amqpMsg.ApplicationProperties != nil { + re.Properties = make(map[string]any, len(amqpMsg.ApplicationProperties)) + for key, value := range amqpMsg.ApplicationProperties { + re.Properties[key] = value + } + } + + if err := updateFromAMQPAnnotations(amqpMsg, re); err != nil { + return nil, err + } + + return re, nil +} + +// the "SystemProperties" in an EventData are any annotations that are +// NOT available at the top level as normal fields. So excluding sequence +// number, offset, enqueued time, and partition key. +func updateFromAMQPAnnotations(src *amqp.Message, dest *ReceivedEventData) error { + if src.Annotations == nil { + return nil + } + + for kAny, v := range src.Annotations { + keyStr, keyIsString := kAny.(string) + + if !keyIsString { + continue + } + + switch keyStr { + case sequenceNumberAnnotation: + if asInt64, ok := eh.ConvertToInt64(v); ok { + dest.SequenceNumber = asInt64 + continue + } + + return errors.New("sequence number cannot be converted to an int64") + case partitionKeyAnnotation: + if asString, ok := v.(string); ok { + dest.PartitionKey = to.Ptr(asString) + continue + } + + return errors.New("partition key cannot be converted to a string") + case enqueuedTimeAnnotation: + if asTime, ok := v.(time.Time); ok { + dest.EnqueuedTime = &asTime + continue + } + + return errors.New("enqueued time cannot be converted to a time.Time") + case offsetNumberAnnotation: + if offsetStr, ok := v.(string); ok { + if offset, err := strconv.ParseInt(offsetStr, 10, 64); err == nil { + dest.Offset = offset + continue + } + } + return errors.New("offset cannot be converted to an int64") + default: + if dest.SystemProperties == nil { + dest.SystemProperties = map[string]any{} + } + + dest.SystemProperties[keyStr] = v + } + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data_batch.go new file mode 100644 index 00000000000..edc6517b90b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/event_data_batch.go @@ -0,0 +1,236 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "errors" + "fmt" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/go-amqp" +) + +// ErrEventDataTooLarge is returned when a message cannot fit into a batch when using the [azeventhubs.EventDataBatch.AddEventData] function. +var ErrEventDataTooLarge = errors.New("the EventData could not be added because it is too large for the batch") + +type ( + // EventDataBatch is used to efficiently pack up EventData before sending it to Event Hubs. + // + // EventDataBatch's are not meant to be created directly. Use [ProducerClient.NewEventDataBatch], + // which will create them with the proper size limit for your Event Hub. + EventDataBatch struct { + mu sync.RWMutex + + marshaledMessages [][]byte + batchEnvelope *amqp.Message + + maxBytes uint64 + currentSize uint64 + + partitionID *string + partitionKey *string + } +) + +const ( + batchMessageFormat uint32 = 0x80013700 +) + +// AddEventDataOptions contains optional parameters for the AddEventData function. +type AddEventDataOptions struct { + // For future expansion +} + +// AddEventData adds an EventData to the batch, failing if the EventData would +// cause the EventDataBatch to be too large to send. +// +// This size limit was set when the EventDataBatch was created, in options to +// [ProducerClient.NewEventDataBatch], or (by default) from Event +// Hubs itself. +// +// Returns ErrMessageTooLarge if the event cannot fit, or a non-nil error for +// other failures. +func (b *EventDataBatch) AddEventData(ed *EventData, options *AddEventDataOptions) error { + return b.addAMQPMessage(ed.toAMQPMessage()) +} + +// AddAMQPAnnotatedMessage adds an AMQPAnnotatedMessage to the batch, failing +// if the AMQPAnnotatedMessage would cause the EventDataBatch to be too large to send. +// +// This size limit was set when the EventDataBatch was created, in options to +// [ProducerClient.NewEventDataBatch], or (by default) from Event +// Hubs itself. +// +// Returns ErrMessageTooLarge if the message cannot fit, or a non-nil error for +// other failures. +func (b *EventDataBatch) AddAMQPAnnotatedMessage(annotatedMessage *AMQPAnnotatedMessage, options *AddEventDataOptions) error { + return b.addAMQPMessage(annotatedMessage.toAMQPMessage()) +} + +// NumBytes is the number of bytes in the batch. +func (b *EventDataBatch) NumBytes() uint64 { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.currentSize +} + +// NumEvents returns the number of events in the batch. +func (b *EventDataBatch) NumEvents() int32 { + b.mu.RLock() + defer b.mu.RUnlock() + + return int32(len(b.marshaledMessages)) +} + +// toAMQPMessage converts this batch into a sendable *amqp.Message +// NOTE: not idempotent! +func (b *EventDataBatch) toAMQPMessage() (*amqp.Message, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if len(b.marshaledMessages) == 0 { + return nil, internal.NewErrNonRetriable("batch is nil or empty") + } + + b.batchEnvelope.Data = make([][]byte, len(b.marshaledMessages)) + b.batchEnvelope.Format = batchMessageFormat + + if b.partitionKey != nil { + if b.batchEnvelope.Annotations == nil { + b.batchEnvelope.Annotations = make(amqp.Annotations) + } + + b.batchEnvelope.Annotations[partitionKeyAnnotation] = *b.partitionKey + } + + copy(b.batchEnvelope.Data, b.marshaledMessages) + return b.batchEnvelope, nil +} + +func (b *EventDataBatch) addAMQPMessage(msg *amqp.Message) error { + if msg.Properties.MessageID == nil || msg.Properties.MessageID == "" { + uid, err := uuid.New() + if err != nil { + return err + } + msg.Properties.MessageID = uid.String() + } + + if b.partitionKey != nil { + if msg.Annotations == nil { + msg.Annotations = make(amqp.Annotations) + } + + msg.Annotations[partitionKeyAnnotation] = *b.partitionKey + } + + bin, err := msg.MarshalBinary() + if err != nil { + return err + } + + b.mu.Lock() + defer b.mu.Unlock() + + if len(b.marshaledMessages) == 0 { + // the first message is special - we use its properties and annotations as the + // actual envelope for the batch message. + batchEnv, batchEnvLen, err := createBatchEnvelope(msg) + + if err != nil { + return err + } + + // (we'll undo this if it turns out the message was too big) + b.currentSize = uint64(batchEnvLen) + b.batchEnvelope = batchEnv + } + + actualPayloadSize := calcActualSizeForPayload(bin) + + if b.currentSize+actualPayloadSize > b.maxBytes { + if len(b.marshaledMessages) == 0 { + // reset our our properties, this didn't end up being our first message. + b.currentSize = 0 + b.batchEnvelope = nil + } + + return ErrEventDataTooLarge + } + + b.currentSize += actualPayloadSize + b.marshaledMessages = append(b.marshaledMessages, bin) + + return nil +} + +// createBatchEnvelope makes a copy of the properties of the message, minus any +// payload fields (like Data, Value or Sequence). The data field will be +// filled in with all the messages when the batch is completed. +func createBatchEnvelope(am *amqp.Message) (*amqp.Message, int, error) { + batchEnvelope := *am + + batchEnvelope.Data = nil + batchEnvelope.Value = nil + batchEnvelope.Sequence = nil + + bytes, err := batchEnvelope.MarshalBinary() + + if err != nil { + return nil, 0, err + } + + return &batchEnvelope, len(bytes), nil +} + +// calcActualSizeForPayload calculates the payload size based +// on overhead from AMQP encoding. +func calcActualSizeForPayload(payload []byte) uint64 { + const vbin8Overhead = 5 + const vbin32Overhead = 8 + + if len(payload) < 256 { + return uint64(vbin8Overhead + len(payload)) + } + + return uint64(vbin32Overhead + len(payload)) +} + +func newEventDataBatch(sender amqpwrap.AMQPSenderCloser, options *EventDataBatchOptions) (*EventDataBatch, error) { + if options == nil { + options = &EventDataBatchOptions{} + } + + if options.PartitionID != nil && options.PartitionKey != nil { + return nil, errors.New("either PartitionID or PartitionKey can be set, but not both") + } + + var batch EventDataBatch + + if options.PartitionID != nil { + // they want to send to a particular partition. The batch size should be the same for any + // link but we might as well use the one they're going to send to. + pid := *options.PartitionID + batch.partitionID = &pid + } else if options.PartitionKey != nil { + partKey := *options.PartitionKey + batch.partitionKey = &partKey + } + + if options.MaxBytes == 0 { + batch.maxBytes = sender.MaxMessageSize() + return &batch, nil + } + + if options.MaxBytes > sender.MaxMessageSize() { + return nil, internal.NewErrNonRetriable(fmt.Sprintf("maximum message size for batch was set to %d bytes, which is larger than the maximum size allowed by link (%d)", options.MaxBytes, sender.MaxMessageSize())) + } + + batch.maxBytes = options.MaxBytes + return &batch, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpInterfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpInterfaces.go new file mode 100644 index 00000000000..f6ea7f0cc37 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpInterfaces.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" +) + +type AMQPReceiver = amqpwrap.AMQPReceiver +type AMQPReceiverCloser = amqpwrap.AMQPReceiverCloser +type AMQPSender = amqpwrap.AMQPSender +type AMQPSenderCloser = amqpwrap.AMQPSenderCloser + +// Closeable is implemented by pretty much any AMQP link/client +// including our own higher level Receiver/Sender. +type Closeable interface { + Close(ctx context.Context) error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqp_fakes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqp_fakes.go new file mode 100644 index 00000000000..a08b084f767 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqp_fakes.go @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/go-amqp" +) + +type FakeNSForPartClient struct { + NamespaceForAMQPLinks + + Receiver *FakeAMQPReceiver + NewReceiverErr error + NewReceiverCalled int + + Sender *FakeAMQPSender + NewSenderErr error + NewSenderCalled int + + RecoverFn func(ctx context.Context, clientRevision uint64) error +} + +type FakeAMQPSession struct { + amqpwrap.AMQPSession + NS *FakeNSForPartClient + CloseCalled int +} + +type FakeAMQPReceiver struct { + amqpwrap.AMQPReceiverCloser + + // ActiveCredits are incremented and decremented by IssueCredit and Receive. + ActiveCredits int32 + + // IssuedCredit just accumulates, so we can get an idea of how many credits we issued overall. + IssuedCredit []uint32 + + // CreditsSetFromOptions is similar to issuedCredit, but only tracks credits added in via the LinkOptions.Credit + // field (ie, enabling prefetch). + CreditsSetFromOptions int32 + + // ManualCreditsSetFromOptions is the value of the LinkOptions.ManualCredits value. + ManualCreditsSetFromOptions bool + + Messages []*amqp.Message + + NameForLink string + + CloseCalled int + CloseError error +} + +func (ns *FakeNSForPartClient) Recover(ctx context.Context, clientRevision uint64) error { + return ns.RecoverFn(ctx, clientRevision) +} + +func (ns *FakeNSForPartClient) NegotiateClaim(ctx context.Context, entityPath string) (context.CancelFunc, <-chan struct{}, error) { + ctx, cancel := context.WithCancel(ctx) + return cancel, ctx.Done(), nil +} + +func (ns *FakeNSForPartClient) NewAMQPSession(ctx context.Context) (amqpwrap.AMQPSession, uint64, error) { + return &FakeAMQPSession{ + NS: ns, + }, 1, nil +} + +func (sess *FakeAMQPSession) NewReceiver(ctx context.Context, source string, partitionID string, opts *amqp.ReceiverOptions) (amqpwrap.AMQPReceiverCloser, error) { + sess.NS.NewReceiverCalled++ + sess.NS.Receiver.ManualCreditsSetFromOptions = opts.Credit == -1 + sess.NS.Receiver.CreditsSetFromOptions = opts.Credit + + if opts.Credit > 0 { + sess.NS.Receiver.ActiveCredits = opts.Credit + } + + return sess.NS.Receiver, sess.NS.NewReceiverErr +} + +func (sess *FakeAMQPSession) NewSender(ctx context.Context, target string, partitionID string, opts *amqp.SenderOptions) (AMQPSenderCloser, error) { + sess.NS.NewSenderCalled++ + return sess.NS.Sender, sess.NS.NewSenderErr +} + +func (sess *FakeAMQPSession) Close(ctx context.Context) error { + sess.CloseCalled++ + return nil +} + +func (r *FakeAMQPReceiver) Credits() uint32 { + return uint32(r.ActiveCredits) +} + +func (r *FakeAMQPReceiver) IssueCredit(credit uint32) error { + r.ActiveCredits += int32(credit) + r.IssuedCredit = append(r.IssuedCredit, credit) + return nil +} + +func (r *FakeAMQPReceiver) LinkName() string { + return r.NameForLink +} + +func (r *FakeAMQPReceiver) Receive(ctx context.Context, o *amqp.ReceiveOptions) (*amqp.Message, error) { + if len(r.Messages) > 0 { + r.ActiveCredits-- + m := r.Messages[0] + r.Messages = r.Messages[1:] + return m, nil + } else { + <-ctx.Done() + return nil, ctx.Err() + } +} + +func (r *FakeAMQPReceiver) Close(ctx context.Context) error { + r.CloseCalled++ + return r.CloseError +} + +type FakeAMQPSender struct { + amqpwrap.AMQPSenderCloser + CloseCalled int + CloseError error +} + +func (s *FakeAMQPSender) Close(ctx context.Context) error { + s.CloseCalled++ + return s.CloseError +} + +type fakeAMQPClient struct { + amqpwrap.AMQPClient + closeCalled int + session *FakeAMQPSession +} + +func (f *fakeAMQPClient) NewSession(ctx context.Context, opts *amqp.SessionOptions) (amqpwrap.AMQPSession, error) { + return f.session, nil +} + +func (f *fakeAMQPClient) Close() error { + f.closeCalled++ + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go new file mode 100644 index 00000000000..750b80c55ea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go @@ -0,0 +1,307 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package amqpwrap has some simple wrappers to make it easier to +// abstract the go-amqp types. +package amqpwrap + +import ( + "context" + "errors" + "time" + + "github.com/Azure/go-amqp" +) + +// AMQPReceiver is implemented by *amqp.Receiver +type AMQPReceiver interface { + IssueCredit(credit uint32) error + Receive(ctx context.Context, o *amqp.ReceiveOptions) (*amqp.Message, error) + Prefetched() *amqp.Message + + // settlement functions + AcceptMessage(ctx context.Context, msg *amqp.Message) error + RejectMessage(ctx context.Context, msg *amqp.Message, e *amqp.Error) error + ReleaseMessage(ctx context.Context, msg *amqp.Message) error + ModifyMessage(ctx context.Context, msg *amqp.Message, options *amqp.ModifyMessageOptions) error + + LinkName() string + LinkSourceFilterValue(name string) any + + // wrapper only functions + + // Credits returns the # of credits still active on this link. + Credits() uint32 + + ConnID() uint64 +} + +// AMQPReceiverCloser is implemented by *amqp.Receiver +type AMQPReceiverCloser interface { + AMQPReceiver + Close(ctx context.Context) error +} + +// AMQPSender is implemented by *amqp.Sender +type AMQPSender interface { + Send(ctx context.Context, msg *amqp.Message, o *amqp.SendOptions) error + MaxMessageSize() uint64 + LinkName() string + ConnID() uint64 +} + +// AMQPSenderCloser is implemented by *amqp.Sender +type AMQPSenderCloser interface { + AMQPSender + Close(ctx context.Context) error +} + +// AMQPSession is a simple interface, implemented by *AMQPSessionWrapper. +// It exists only so we can return AMQPReceiver/AMQPSender interfaces. +type AMQPSession interface { + Close(ctx context.Context) error + ConnID() uint64 + NewReceiver(ctx context.Context, source string, partitionID string, opts *amqp.ReceiverOptions) (AMQPReceiverCloser, error) + NewSender(ctx context.Context, target string, partitionID string, opts *amqp.SenderOptions) (AMQPSenderCloser, error) +} + +type AMQPClient interface { + Close() error + NewSession(ctx context.Context, opts *amqp.SessionOptions) (AMQPSession, error) + ID() uint64 +} + +type goamqpConn interface { + NewSession(ctx context.Context, opts *amqp.SessionOptions) (*amqp.Session, error) + Close() error +} + +type goamqpSession interface { + Close(ctx context.Context) error + NewReceiver(ctx context.Context, source string, opts *amqp.ReceiverOptions) (*amqp.Receiver, error) + NewSender(ctx context.Context, target string, opts *amqp.SenderOptions) (*amqp.Sender, error) +} + +type goamqpReceiver interface { + IssueCredit(credit uint32) error + Receive(ctx context.Context, o *amqp.ReceiveOptions) (*amqp.Message, error) + Prefetched() *amqp.Message + + // settlement functions + AcceptMessage(ctx context.Context, msg *amqp.Message) error + RejectMessage(ctx context.Context, msg *amqp.Message, e *amqp.Error) error + ReleaseMessage(ctx context.Context, msg *amqp.Message) error + ModifyMessage(ctx context.Context, msg *amqp.Message, options *amqp.ModifyMessageOptions) error + + LinkName() string + LinkSourceFilterValue(name string) any + Close(ctx context.Context) error +} + +type goamqpSender interface { + Send(ctx context.Context, msg *amqp.Message, o *amqp.SendOptions) error + MaxMessageSize() uint64 + LinkName() string + Close(ctx context.Context) error +} + +// AMQPClientWrapper is a simple interface, implemented by *AMQPClientWrapper +// It exists only so we can return AMQPSession, which itself only exists so we can +// return interfaces for AMQPSender and AMQPReceiver from AMQPSession. +type AMQPClientWrapper struct { + ConnID uint64 + Inner goamqpConn +} + +func (w *AMQPClientWrapper) ID() uint64 { + return w.ConnID +} + +func (w *AMQPClientWrapper) Close() error { + err := w.Inner.Close() + return WrapError(err, w.ConnID, "", "") +} + +func (w *AMQPClientWrapper) NewSession(ctx context.Context, opts *amqp.SessionOptions) (AMQPSession, error) { + sess, err := w.Inner.NewSession(ctx, opts) + + if err != nil { + return nil, WrapError(err, w.ConnID, "", "") + } + + return &AMQPSessionWrapper{ + connID: w.ConnID, + Inner: sess, + ContextWithTimeoutFn: context.WithTimeout, + }, nil +} + +type AMQPSessionWrapper struct { + connID uint64 + Inner goamqpSession + ContextWithTimeoutFn ContextWithTimeoutFn +} + +func (w *AMQPSessionWrapper) ConnID() uint64 { + return w.connID +} + +func (w *AMQPSessionWrapper) Close(ctx context.Context) error { + ctx, cancel := w.ContextWithTimeoutFn(ctx, defaultCloseTimeout) + defer cancel() + err := w.Inner.Close(ctx) + return WrapError(err, w.connID, "", "") +} + +func (w *AMQPSessionWrapper) NewReceiver(ctx context.Context, source string, partitionID string, opts *amqp.ReceiverOptions) (AMQPReceiverCloser, error) { + receiver, err := w.Inner.NewReceiver(ctx, source, opts) + + if err != nil { + return nil, WrapError(err, w.connID, "", partitionID) + } + + return &AMQPReceiverWrapper{ + connID: w.connID, + partitionID: partitionID, + Inner: receiver, + ContextWithTimeoutFn: context.WithTimeout}, nil +} + +func (w *AMQPSessionWrapper) NewSender(ctx context.Context, target string, partitionID string, opts *amqp.SenderOptions) (AMQPSenderCloser, error) { + sender, err := w.Inner.NewSender(ctx, target, opts) + + if err != nil { + return nil, WrapError(err, w.connID, "", partitionID) + } + + return &AMQPSenderWrapper{ + connID: w.connID, + partitionID: partitionID, + Inner: sender, + ContextWithTimeoutFn: context.WithTimeout}, nil +} + +type AMQPReceiverWrapper struct { + connID uint64 + partitionID string + Inner goamqpReceiver + credits uint32 + ContextWithTimeoutFn ContextWithTimeoutFn +} + +func (rw *AMQPReceiverWrapper) ConnID() uint64 { + return rw.connID +} + +func (rw *AMQPReceiverWrapper) Credits() uint32 { + return rw.credits +} + +func (rw *AMQPReceiverWrapper) IssueCredit(credit uint32) error { + err := rw.Inner.IssueCredit(credit) + + if err == nil { + rw.credits += credit + } + + return WrapError(err, rw.connID, rw.LinkName(), rw.partitionID) +} + +func (rw *AMQPReceiverWrapper) Receive(ctx context.Context, o *amqp.ReceiveOptions) (*amqp.Message, error) { + message, err := rw.Inner.Receive(ctx, o) + + if err != nil { + return nil, WrapError(err, rw.connID, rw.LinkName(), rw.partitionID) + } + + rw.credits-- + return message, nil +} + +func (rw *AMQPReceiverWrapper) Prefetched() *amqp.Message { + msg := rw.Inner.Prefetched() + + if msg == nil { + return nil + } + + rw.credits-- + return msg +} + +// settlement functions +func (rw *AMQPReceiverWrapper) AcceptMessage(ctx context.Context, msg *amqp.Message) error { + err := rw.Inner.AcceptMessage(ctx, msg) + return WrapError(err, rw.connID, rw.LinkName(), rw.partitionID) +} + +func (rw *AMQPReceiverWrapper) RejectMessage(ctx context.Context, msg *amqp.Message, e *amqp.Error) error { + err := rw.Inner.RejectMessage(ctx, msg, e) + return WrapError(err, rw.connID, rw.LinkName(), rw.partitionID) +} + +func (rw *AMQPReceiverWrapper) ReleaseMessage(ctx context.Context, msg *amqp.Message) error { + err := rw.Inner.ReleaseMessage(ctx, msg) + return WrapError(err, rw.connID, rw.LinkName(), rw.partitionID) +} + +func (rw *AMQPReceiverWrapper) ModifyMessage(ctx context.Context, msg *amqp.Message, options *amqp.ModifyMessageOptions) error { + err := rw.Inner.ModifyMessage(ctx, msg, options) + return WrapError(err, rw.connID, rw.LinkName(), rw.partitionID) +} + +func (rw *AMQPReceiverWrapper) LinkName() string { + return rw.Inner.LinkName() +} + +func (rw *AMQPReceiverWrapper) LinkSourceFilterValue(name string) any { + return rw.Inner.LinkSourceFilterValue(name) +} + +func (rw *AMQPReceiverWrapper) Close(ctx context.Context) error { + ctx, cancel := rw.ContextWithTimeoutFn(ctx, defaultCloseTimeout) + defer cancel() + err := rw.Inner.Close(ctx) + + return WrapError(err, rw.connID, rw.LinkName(), rw.partitionID) +} + +type AMQPSenderWrapper struct { + connID uint64 + partitionID string + Inner goamqpSender + ContextWithTimeoutFn ContextWithTimeoutFn +} + +func (sw *AMQPSenderWrapper) ConnID() uint64 { + return sw.connID +} + +func (sw *AMQPSenderWrapper) Send(ctx context.Context, msg *amqp.Message, o *amqp.SendOptions) error { + err := sw.Inner.Send(ctx, msg, o) + return WrapError(err, sw.connID, sw.LinkName(), sw.partitionID) +} + +func (sw *AMQPSenderWrapper) MaxMessageSize() uint64 { + return sw.Inner.MaxMessageSize() +} + +func (sw *AMQPSenderWrapper) LinkName() string { + return sw.Inner.LinkName() +} + +func (sw *AMQPSenderWrapper) Close(ctx context.Context) error { + ctx, cancel := sw.ContextWithTimeoutFn(ctx, defaultCloseTimeout) + defer cancel() + err := sw.Inner.Close(ctx) + + return WrapError(err, sw.connID, sw.LinkName(), sw.partitionID) +} + +var ErrConnResetNeeded = errors.New("connection must be reset, link/connection state may be inconsistent") + +const defaultCloseTimeout = time.Minute + +// ContextWithTimeoutFn matches the signature for `context.WithTimeout` and is used when we want to +// stub things out for tests. +type ContextWithTimeoutFn func(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/error.go new file mode 100644 index 00000000000..5953fd18c37 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/error.go @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package amqpwrap + +import ( + "errors" +) + +// Error is a wrapper that has the context of which connection and +// link the error happened with. +type Error struct { + ConnID uint64 + LinkName string + PartitionID string + Err error +} + +func (e Error) Error() string { + return e.Err.Error() +} + +func (e Error) As(target any) bool { + return errors.As(e.Err, target) +} + +func (e Error) Is(target error) bool { + return errors.Is(e.Err, target) +} + +func WrapError(err error, connID uint64, linkName string, partitionID string) error { + if err == nil { + return nil + } + + return Error{ + ConnID: connID, + LinkName: linkName, + PartitionID: partitionID, + Err: err, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go new file mode 100644 index 00000000000..ced17fbc493 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package amqpwrap + +import ( + "context" + + "github.com/Azure/go-amqp" +) + +// RPCResponse is the simplified response structure from an RPC like call +type RPCResponse struct { + // Code is the response code - these originate from Service Bus. Some + // common values are called out below, with the RPCResponseCode* constants. + Code int + Description string + Message *amqp.Message +} + +// RPCLink is implemented by *rpc.Link +type RPCLink interface { + Close(ctx context.Context) error + ConnID() uint64 + RPC(ctx context.Context, msg *amqp.Message) (*RPCResponse, error) + LinkName() string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth/token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth/token.go new file mode 100644 index 00000000000..9aed3b521d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth/token.go @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package auth provides an abstraction over claims-based security for Azure Event Hub and Service Bus. +package auth + +const ( + // CBSTokenTypeJWT is the type of token to be used for JWTs. For example Azure Active Directory tokens. + CBSTokenTypeJWT TokenType = "jwt" + // CBSTokenTypeSAS is the type of token to be used for SAS tokens. + CBSTokenTypeSAS TokenType = "servicebus.windows.net:sastoken" +) + +type ( + // TokenType represents types of tokens known for claims-based auth + TokenType string + + // Token contains all of the information to negotiate authentication + Token struct { + // TokenType is the type of CBS token + TokenType TokenType + Token string + Expiry string + } + + // TokenProvider abstracts the fetching of authentication tokens + TokenProvider interface { + GetToken(uri string) (*Token, error) + } +) + +// NewToken constructs a new auth token +func NewToken(tokenType TokenType, token, expiry string) *Token { + return &Token{ + TokenType: tokenType, + Token: token, + Expiry: expiry, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/cbs.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/cbs.go new file mode 100644 index 00000000000..103f71a9a92 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/cbs.go @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + "github.com/Azure/go-amqp" +) + +const ( + cbsAddress = "$cbs" + cbsOperationKey = "operation" + cbsOperationPutToken = "put-token" + cbsTokenTypeKey = "type" + cbsAudienceKey = "name" + cbsExpirationKey = "expiration" +) + +// NegotiateClaim attempts to put a token to the $cbs management endpoint to negotiate auth for the given audience +func NegotiateClaim(ctx context.Context, audience string, conn amqpwrap.AMQPClient, provider auth.TokenProvider) error { + link, err := NewRPCLink(ctx, RPCLinkArgs{ + Client: conn, + Address: cbsAddress, + LogEvent: exported.EventAuth, + }) + + if err != nil { + // In some circumstances we can end up in a situation where the link closing was cancelled + // or interrupted, leaving $cbs still open by some dangling receiver or sender. The only way + // to fix this is to restart the connection. + if IsNotAllowedError(err) { + azlog.Writef(exported.EventAuth, "Not allowed to open, connection will be reset: %s", err) + return amqpwrap.ErrConnResetNeeded + } + + return err + } + + closeLink := func(ctx context.Context, origErr error) error { + if err := link.Close(ctx); err != nil { + azlog.Writef(exported.EventAuth, "Failed closing claim link: %s", err.Error()) + return err + } + + return origErr + } + + token, err := provider.GetToken(audience) + if err != nil { + azlog.Writef(exported.EventAuth, "Failed to get token from provider: %s", err) + return closeLink(ctx, err) + } + + azlog.Writef(exported.EventAuth, "negotiating claim for audience %s with token type %s and expiry of %s", audience, token.TokenType, token.Expiry) + + msg := &amqp.Message{ + Value: token.Token, + ApplicationProperties: map[string]any{ + cbsOperationKey: cbsOperationPutToken, + cbsTokenTypeKey: string(token.TokenType), + cbsAudienceKey: audience, + cbsExpirationKey: token.Expiry, + }, + } + + if _, err := link.RPC(ctx, msg); err != nil { + azlog.Writef(exported.EventAuth, "Failed to send/receive RPC message: %s", err) + return closeLink(ctx, err) + } + + return closeLink(ctx, nil) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/constants.go new file mode 100644 index 00000000000..154eda8786c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/constants.go @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +// Version is the semantic version number +const Version = "v1.2.1" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh/eh_internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh/eh_internal.go new file mode 100644 index 00000000000..17e0c7f138b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh/eh_internal.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package eh + +// ConvertToInt64 converts any int-like value to be an int64. +func ConvertToInt64(intValue any) (int64, bool) { + switch v := intValue.(type) { + case int: + return int64(v), true + case int8: + return int64(v), true + case int16: + return int64(v), true + case int32: + return int64(v), true + case int64: + return int64(v), true + } + + return 0, false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/errors.go new file mode 100644 index 00000000000..d86c09f8bf6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/errors.go @@ -0,0 +1,265 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + "errors" + "io" + "net" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + "github.com/Azure/go-amqp" +) + +type errNonRetriable struct { + Message string +} + +func NewErrNonRetriable(message string) error { + return errNonRetriable{Message: message} +} + +func (e errNonRetriable) Error() string { return e.Message } + +// RecoveryKind dictates what kind of recovery is possible. Used with +// GetRecoveryKind(). +type RecoveryKind string + +const ( + RecoveryKindNone RecoveryKind = "" + RecoveryKindFatal RecoveryKind = "fatal" + RecoveryKindLink RecoveryKind = "link" + RecoveryKindConn RecoveryKind = "connection" +) + +func IsFatalEHError(err error) bool { + return GetRecoveryKind(err) == RecoveryKindFatal +} + +// TransformError will create a proper error type that users +// can potentially inspect. +// If the error is actionable then it'll be of type exported.Error which +// has a 'Code' field that can be used programatically. +// If it's not actionable or if it's nil it'll just be returned. +func TransformError(err error) error { + if err == nil { + return nil + } + + _, ok := err.(*exported.Error) + + if ok { + // it's already been wrapped. + return err + } + + if IsOwnershipLostError(err) { + return exported.NewError(exported.ErrorCodeOwnershipLost, err) + } + + // there are a few errors that all boil down to "bad creds or unauthorized" + var amqpErr *amqp.Error + + if errors.As(err, &amqpErr) && amqpErr.Condition == amqp.ErrCondUnauthorizedAccess { + return exported.NewError(exported.ErrorCodeUnauthorizedAccess, err) + } + + var rpcErr RPCError + if errors.As(err, &rpcErr) && rpcErr.Resp.Code == http.StatusUnauthorized { + return exported.NewError(exported.ErrorCodeUnauthorizedAccess, err) + } + + rk := GetRecoveryKind(err) + + switch rk { + case RecoveryKindLink: + // note that we could give back a more differentiated error code + // here but it's probably best to just give the customer the simplest + // recovery mechanism possible. + return exported.NewError(exported.ErrorCodeConnectionLost, err) + case RecoveryKindConn: + return exported.NewError(exported.ErrorCodeConnectionLost, err) + default: + // isn't one of our specifically called out cases so we'll just return it. + return err + } +} + +func IsQuickRecoveryError(err error) bool { + if IsOwnershipLostError(err) { + return false + } + + var de *amqp.LinkError + return errors.As(err, &de) +} + +func IsCancelError(err error) bool { + if err == nil { + return false + } + + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return true + } + + if err.Error() == "context canceled" { // go-amqp is returning this when I cancel + return true + } + + return false +} + +const errorConditionLockLost = amqp.ErrCond("com.microsoft:message-lock-lost") + +var amqpConditionsToRecoveryKind = map[amqp.ErrCond]RecoveryKind{ + // no recovery needed, these are temporary errors. + amqp.ErrCond("com.microsoft:server-busy"): RecoveryKindNone, + amqp.ErrCond("com.microsoft:timeout"): RecoveryKindNone, + amqp.ErrCond("com.microsoft:operation-cancelled"): RecoveryKindNone, + + // Link recovery needed + amqp.ErrCondDetachForced: RecoveryKindLink, // "amqp:link:detach-forced" + amqp.ErrCondTransferLimitExceeded: RecoveryKindLink, // "amqp:link:transfer-limit-exceeded" + + // Connection recovery needed + amqp.ErrCondConnectionForced: RecoveryKindConn, // "amqp:connection:forced" + amqp.ErrCondInternalError: RecoveryKindConn, // "amqp:internal-error" + + // No recovery possible - this operation is non retriable. + + // ErrCondResourceLimitExceeded comes back if the entity is actually full. + amqp.ErrCondResourceLimitExceeded: RecoveryKindFatal, // "amqp:resource-limit-exceeded" + amqp.ErrCondMessageSizeExceeded: RecoveryKindFatal, // "amqp:link:message-size-exceeded" + amqp.ErrCondUnauthorizedAccess: RecoveryKindFatal, // creds are bad + amqp.ErrCondNotFound: RecoveryKindFatal, // "amqp:not-found" + amqp.ErrCondNotAllowed: RecoveryKindFatal, // "amqp:not-allowed" + amqp.ErrCond("com.microsoft:entity-disabled"): RecoveryKindFatal, // entity is disabled in the portal + amqp.ErrCond("com.microsoft:session-cannot-be-locked"): RecoveryKindFatal, + amqp.ErrCond("com.microsoft:argument-out-of-range"): RecoveryKindFatal, // asked for a partition ID that doesn't exist + errorConditionLockLost: RecoveryKindFatal, +} + +// GetRecoveryKind determines the recovery type for non-session based links. +func GetRecoveryKind(err error) RecoveryKind { + if err == nil { + return RecoveryKindNone + } + + if errors.Is(err, RPCLinkClosedErr) { + return RecoveryKindFatal + } + + if IsCancelError(err) { + return RecoveryKindFatal + } + + if errors.Is(err, amqpwrap.ErrConnResetNeeded) { + return RecoveryKindConn + } + + var netErr net.Error + + // these are errors that can flow from the go-amqp connection to + // us. There's work underway to improve this but for now we can handle + // these as "catastrophic" errors and reset everything. + if errors.Is(err, io.EOF) || errors.As(err, &netErr) { + return RecoveryKindConn + } + + var errNonRetriable errNonRetriable + + if errors.As(err, &errNonRetriable) { + return RecoveryKindFatal + } + + // azidentity returns errors that match this for auth failures. + var errNonRetriableMarker interface { + NonRetriable() + error + } + + if errors.As(err, &errNonRetriableMarker) { + return RecoveryKindFatal + } + + if IsOwnershipLostError(err) { + return RecoveryKindFatal + } + + // check the "special" AMQP errors that aren't condition-based. + if IsQuickRecoveryError(err) { + return RecoveryKindLink + } + + var connErr *amqp.ConnError + var sessionErr *amqp.SessionError + + if errors.As(err, &connErr) || + // session closures appear to leak through when the connection itself is going down. + errors.As(err, &sessionErr) { + return RecoveryKindConn + } + + // then it's _probably_ an actual *amqp.Error, in which case we bucket it by + // the 'condition'. + var amqpError *amqp.Error + + if errors.As(err, &amqpError) { + recoveryKind, ok := amqpConditionsToRecoveryKind[amqpError.Condition] + + if ok { + return recoveryKind + } + } + + var rpcErr RPCError + + if errors.As(err, &rpcErr) { + // Described more here: + // https://www.oasis-open.org/committees/download.php/54441/AMQP%20Management%20v1.0%20WD09 + // > Unsuccessful operations MUST NOT result in a statusCode in the 2xx range as defined in Section 10.2 of [RFC2616] + // RFC2616 is the specification for HTTP. + code := rpcErr.RPCCode() + + if code == http.StatusNotFound || + code == http.StatusUnauthorized { + return RecoveryKindFatal + } + + // simple timeouts + if rpcErr.Resp.Code == http.StatusRequestTimeout || rpcErr.Resp.Code == http.StatusServiceUnavailable || + // internal server errors are worth retrying (they will typically lead + // to a more actionable error). A simple example of this is when you're + // in the middle of an operation and the link is detached. Sometimes you'll get + // the detached event immediately, but sometimes you'll get an intermediate 500 + // indicating your original operation was cancelled. + rpcErr.Resp.Code == http.StatusInternalServerError { + return RecoveryKindNone + } + } + + // this is some error type we've never seen - recover the entire connection. + return RecoveryKindConn +} + +func IsNotAllowedError(err error) bool { + var e *amqp.Error + + return errors.As(err, &e) && + e.Condition == amqp.ErrCondNotAllowed +} + +func IsOwnershipLostError(err error) bool { + var de *amqp.LinkError + + if errors.As(err, &de) { + return de.RemoteErr != nil && de.RemoteErr.Condition == "amqp:link:stolen" + } + + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/connection_string_properties.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/connection_string_properties.go new file mode 100644 index 00000000000..b77d22305c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/connection_string_properties.go @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" +) + +// ConnectionStringProperties are the properties of a connection string +// as returned by [ParseConnectionString]. +type ConnectionStringProperties struct { + // Endpoint is the Endpoint value in the connection string. + // Ex: sb://example.servicebus.windows.net + Endpoint string + + // EntityPath is EntityPath value in the connection string. + EntityPath *string + + // FullyQualifiedNamespace is the Endpoint value without the protocol scheme. + // Ex: example.servicebus.windows.net + FullyQualifiedNamespace string + + // SharedAccessKey is the SharedAccessKey value in the connection string. + SharedAccessKey *string + + // SharedAccessKeyName is the SharedAccessKeyName value in the connection string. + SharedAccessKeyName *string + + // SharedAccessSignature is the SharedAccessSignature value in the connection string. + SharedAccessSignature *string + + // Emulator indicates that the connection string is for an emulator: + // ex: Endpoint=localhost:6765;SharedAccessKeyName=<< REDACTED >>;SharedAccessKey=<< REDACTED >>;UseDevelopmentEmulator=true + Emulator bool +} + +// ParseConnectionString takes a connection string from the Azure portal and returns the +// parsed representation. +// +// There are two supported formats: +// +// 1. Connection strings generated from the portal (or elsewhere) that contain an embedded key and keyname. +// +// 2. A connection string with an embedded SharedAccessSignature: +// Endpoint=sb://.servicebus.windows.net;SharedAccessSignature=SharedAccessSignature sr=.servicebus.windows.net&sig=&se=&skn=" +func ParseConnectionString(connStr string) (ConnectionStringProperties, error) { + const ( + endpointKey = "Endpoint" + sharedAccessKeyNameKey = "SharedAccessKeyName" + sharedAccessKeyKey = "SharedAccessKey" + entityPathKey = "EntityPath" + sharedAccessSignatureKey = "SharedAccessSignature" + useEmulator = "UseDevelopmentEmulator" + ) + + csp := ConnectionStringProperties{} + + splits := strings.Split(connStr, ";") + + for _, split := range splits { + if split == "" { + continue + } + + keyAndValue := strings.SplitN(split, "=", 2) + if len(keyAndValue) < 2 { + return ConnectionStringProperties{}, errors.New("failed parsing connection string due to unmatched key value separated by '='") + } + + // if a key value pair has `=` in the value, recombine them + key := keyAndValue[0] + value := strings.Join(keyAndValue[1:], "=") + switch { + case strings.EqualFold(endpointKey, key): + u, err := url.Parse(value) + if err != nil { + return ConnectionStringProperties{}, errors.New("failed parsing connection string due to an incorrectly formatted Endpoint value") + } + csp.Endpoint = value + csp.FullyQualifiedNamespace = u.Host + case strings.EqualFold(sharedAccessKeyNameKey, key): + csp.SharedAccessKeyName = &value + case strings.EqualFold(sharedAccessKeyKey, key): + csp.SharedAccessKey = &value + case strings.EqualFold(entityPathKey, key): + csp.EntityPath = &value + case strings.EqualFold(sharedAccessSignatureKey, key): + csp.SharedAccessSignature = &value + case strings.EqualFold(useEmulator, key): + v, err := strconv.ParseBool(value) + + if err != nil { + return ConnectionStringProperties{}, err + } + + csp.Emulator = v + } + } + + if csp.Emulator { + endpointParts := strings.SplitN(csp.Endpoint, ":", 3) // allow for a port, if it exists. + + if len(endpointParts) < 2 || endpointParts[0] != "sb" { + // there should always be at least two parts "sb:" and "//" + // with an optional 3rd piece that's the port "1111". + // (we don't need to validate it's a valid host since it's been through url.Parse() above) + return ConnectionStringProperties{}, fmt.Errorf("UseDevelopmentEmulator=true can only be used with sb:// or sb://:, not %s", csp.Endpoint) + } + } + + if csp.FullyQualifiedNamespace == "" { + return ConnectionStringProperties{}, fmt.Errorf("key %q must not be empty", endpointKey) + } + + if csp.SharedAccessSignature == nil && csp.SharedAccessKeyName == nil { + return ConnectionStringProperties{}, fmt.Errorf("key %q must not be empty", sharedAccessKeyNameKey) + } + + if csp.SharedAccessKey == nil && csp.SharedAccessSignature == nil { + return ConnectionStringProperties{}, fmt.Errorf("key %q or %q cannot both be empty", sharedAccessKeyKey, sharedAccessSignatureKey) + } + + return csp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/error.go new file mode 100644 index 00000000000..23a920a61c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/error.go @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import "fmt" + +// ErrorCode is an error code, usable by consuming code to work with +// programatically. +type ErrorCode string + +const ( + // ErrorCodeUnauthorizedAccess means the credentials provided are not valid for use with + // a particular entity, or have expired. + ErrorCodeUnauthorizedAccess ErrorCode = "unauthorized" + + // ErrorCodeConnectionLost means our connection was lost and all retry attempts failed. + // This typically reflects an extended outage or connection disruption and may + // require manual intervention. + ErrorCodeConnectionLost ErrorCode = "connlost" + + // ErrorCodeOwnershipLost means that a partition that you were reading from was opened + // by another link with an epoch/owner level greater or equal to your [PartitionClient]. + // + // When using types like the [Processor], partition ownership will change as instances + // rebalance. + ErrorCodeOwnershipLost ErrorCode = "ownershiplost" +) + +// Error represents an Event Hub specific error. +// NOTE: the Code is considered part of the published API but the message that +// comes back from Error(), as well as the underlying wrapped error, are NOT and +// are subject to change. +type Error struct { + // Code is a stable error code which can be used as part of programatic error handling. + // The codes can expand in the future, but the values (and their meaning) will remain the same. + Code ErrorCode + innerErr error +} + +// Error is an error message containing the code and a user friendly message, if any. +func (e *Error) Error() string { + msg := "unknown error" + if e.innerErr != nil { + msg = e.innerErr.Error() + } + return fmt.Sprintf("(%s): %s", e.Code, msg) +} + +// NewError creates a new `Error` instance. +// NOTE: this function is only exported so it can be used by the `internal` +// package. It is not available for customers. +func NewError(code ErrorCode, innerErr error) error { + return &Error{ + Code: code, + innerErr: innerErr, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/log_events.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/log_events.go new file mode 100644 index 00000000000..2c4a36f403b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/log_events.go @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azeventhubs/log.go +const ( + // EventConn is used whenever we create a connection or any links (ie: receivers, senders). + EventConn log.Event = "azeh.Conn" + + // EventAuth is used when we're doing authentication/claims negotiation. + EventAuth log.Event = "azeh.Auth" + + // EventProducer represents operations that happen on Producers. + EventProducer log.Event = "azeh.Producer" + + // EventConsumer represents operations that happen on Consumers. + EventConsumer log.Event = "azeh.Consumer" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/retry_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/retry_options.go new file mode 100644 index 00000000000..6bed306ad5c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/retry_options.go @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import "time" + +// NOTE: this is exposed via type-aliasing in azeventhubs/client.go + +// RetryOptions represent the options for retries. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 120 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/websocket_conn_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/websocket_conn_params.go new file mode 100644 index 00000000000..5bc28602450 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported/websocket_conn_params.go @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +// NOTE: this struct is exported via client.go:WebSocketConnParams + +// WebSocketConnParams are the arguments to the NewWebSocketConn function you pass if you want +// to enable websockets. +type WebSocketConnParams struct { + // Host is the the `wss://` to connect to + Host string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links.go new file mode 100644 index 00000000000..b20fa6f62fd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links.go @@ -0,0 +1,395 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + "fmt" + "sync" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" +) + +type AMQPLink interface { + Close(ctx context.Context) error + LinkName() string +} + +// LinksForPartitionClient are the functions that the PartitionClient uses within Links[T] +// (for unit testing only) +type LinksForPartitionClient[LinkT AMQPLink] interface { + Retry(ctx context.Context, eventName azlog.Event, operation string, partitionID string, retryOptions exported.RetryOptions, fn func(ctx context.Context, lwid LinkWithID[LinkT]) error) error + Close(ctx context.Context) error +} + +type Links[LinkT AMQPLink] struct { + ns NamespaceForAMQPLinks + + linksMu *sync.RWMutex + links map[string]*linkState[LinkT] + + managementLinkMu *sync.RWMutex + managementLink *linkState[amqpwrap.RPCLink] + + managementPath string + newLinkFn NewLinksFn[LinkT] + entityPathFn func(partitionID string) string + + lr LinkRetrier[LinkT] + mr LinkRetrier[amqpwrap.RPCLink] +} + +type NewLinksFn[LinkT AMQPLink] func(ctx context.Context, session amqpwrap.AMQPSession, entityPath string, partitionID string) (LinkT, error) + +func NewLinks[LinkT AMQPLink](ns NamespaceForAMQPLinks, managementPath string, entityPathFn func(partitionID string) string, newLinkFn NewLinksFn[LinkT]) *Links[LinkT] { + l := &Links[LinkT]{ + ns: ns, + linksMu: &sync.RWMutex{}, + links: map[string]*linkState[LinkT]{}, + managementLinkMu: &sync.RWMutex{}, + managementPath: managementPath, + + newLinkFn: newLinkFn, + entityPathFn: entityPathFn, + } + + l.lr = LinkRetrier[LinkT]{ + GetLink: l.GetLink, + CloseLink: l.closePartitionLinkIfMatch, + NSRecover: l.ns.Recover, + } + + l.mr = LinkRetrier[amqpwrap.RPCLink]{ + GetLink: func(ctx context.Context, partitionID string) (LinkWithID[amqpwrap.RPCLink], error) { + return l.GetManagementLink(ctx) + }, + CloseLink: func(ctx context.Context, _, linkName string) error { + return l.closeManagementLinkIfMatch(ctx, linkName) + }, + NSRecover: l.ns.Recover, + } + + return l +} + +func (l *Links[LinkT]) RetryManagement(ctx context.Context, eventName azlog.Event, operation string, retryOptions exported.RetryOptions, fn func(ctx context.Context, lwid LinkWithID[amqpwrap.RPCLink]) error) error { + return l.mr.Retry(ctx, eventName, operation, "", retryOptions, fn) +} + +func (l *Links[LinkT]) Retry(ctx context.Context, eventName azlog.Event, operation string, partitionID string, retryOptions exported.RetryOptions, fn func(ctx context.Context, lwid LinkWithID[LinkT]) error) error { + return l.lr.Retry(ctx, eventName, operation, partitionID, retryOptions, fn) +} + +func (l *Links[LinkT]) GetLink(ctx context.Context, partitionID string) (LinkWithID[LinkT], error) { + if err := l.checkOpen(); err != nil { + return nil, err + } + + l.linksMu.RLock() + current := l.links[partitionID] + l.linksMu.RUnlock() + + if current != nil { + return current, nil + } + + // no existing link, let's create a new one within the write lock. + l.linksMu.Lock() + defer l.linksMu.Unlock() + + // check again now that we have the write lock + current = l.links[partitionID] + + if current == nil { + ls, err := l.newLinkState(ctx, partitionID) + + if err != nil { + return nil, err + } + + l.links[partitionID] = ls + current = ls + } + + return current, nil +} + +func (l *Links[LinkT]) GetManagementLink(ctx context.Context) (LinkWithID[amqpwrap.RPCLink], error) { + if err := l.checkOpen(); err != nil { + return nil, err + } + + l.managementLinkMu.Lock() + defer l.managementLinkMu.Unlock() + + if l.managementLink == nil { + ls, err := l.newManagementLinkState(ctx) + + if err != nil { + return nil, err + } + + l.managementLink = ls + } + + return l.managementLink, nil +} + +func (l *Links[LinkT]) newLinkState(ctx context.Context, partitionID string) (*linkState[LinkT], error) { + azlog.Writef(exported.EventConn, "Creating link for partition ID '%s'", partitionID) + + // check again now that we have the write lock + ls := &linkState[LinkT]{ + partitionID: partitionID, + } + + cancelAuth, _, err := l.ns.NegotiateClaim(ctx, l.entityPathFn(partitionID)) + + if err != nil { + azlog.Writef(exported.EventConn, "(%s): Failed to negotiate claim for partition ID '%s': %s", ls.String(), partitionID, err) + return nil, err + } + + ls.cancelAuth = cancelAuth + + session, connID, err := l.ns.NewAMQPSession(ctx) + + if err != nil { + azlog.Writef(exported.EventConn, "(%s): Failed to create AMQP session for partition ID '%s': %s", ls.String(), partitionID, err) + _ = ls.Close(ctx) + return nil, err + } + + ls.session = session + ls.connID = connID + + tmpLink, err := l.newLinkFn(ctx, session, l.entityPathFn(partitionID), partitionID) + + if err != nil { + azlog.Writef(exported.EventConn, "(%s): Failed to create link for partition ID '%s': %s", ls.String(), partitionID, err) + _ = ls.Close(ctx) + return nil, err + } + + ls.link = &tmpLink + + azlog.Writef(exported.EventConn, "(%s): Succesfully created link for partition ID '%s'", ls.String(), partitionID) + return ls, nil +} + +func (l *Links[LinkT]) newManagementLinkState(ctx context.Context) (*linkState[amqpwrap.RPCLink], error) { + ls := &linkState[amqpwrap.RPCLink]{} + + cancelAuth, _, err := l.ns.NegotiateClaim(ctx, l.managementPath) + + if err != nil { + return nil, err + } + + ls.cancelAuth = cancelAuth + + tmpRPCLink, connID, err := l.ns.NewRPCLink(ctx, "$management") + + if err != nil { + _ = ls.Close(ctx) + return nil, err + } + + ls.connID = connID + ls.link = &tmpRPCLink + + return ls, nil +} + +func (l *Links[LinkT]) Close(ctx context.Context) error { + return l.closeLinks(ctx, true) +} + +func (l *Links[LinkT]) closeLinks(ctx context.Context, permanent bool) error { + cancelled := false + + // clear out the management link + func() { + l.managementLinkMu.Lock() + defer l.managementLinkMu.Unlock() + + if l.managementLink == nil { + return + } + + mgmtLink := l.managementLink + l.managementLink = nil + + if err := mgmtLink.Close(ctx); err != nil { + azlog.Writef(exported.EventConn, "Error while cleaning up management link while doing connection recovery: %s", err.Error()) + + if IsCancelError(err) { + cancelled = true + } + } + }() + + l.linksMu.Lock() + defer l.linksMu.Unlock() + + tmpLinks := l.links + l.links = nil + + for partitionID, link := range tmpLinks { + if err := link.Close(ctx); err != nil { + azlog.Writef(exported.EventConn, "Error while cleaning up link for partition ID '%s' while doing connection recovery: %s", partitionID, err.Error()) + + if IsCancelError(err) { + cancelled = true + } + } + } + + if !permanent { + l.links = map[string]*linkState[LinkT]{} + } + + if cancelled { + // this is the only kind of error I'd consider usable from Close() - it'll indicate + // that some of the links haven't been cleanly closed. + return ctx.Err() + } + + return nil +} + +func (l *Links[LinkT]) checkOpen() error { + l.linksMu.RLock() + defer l.linksMu.RUnlock() + + if l.links == nil { + return NewErrNonRetriable("client has been closed by user") + } + + return nil +} + +// closePartitionLinkIfMatch will close the link in the cache if it matches the passed in linkName. +// This is similar to how an etag works - we'll only close it if you are working with the latest link - +// if not, it's a no-op since somebody else has already 'saved' (recovered) before you. +// +// Note that the only error that can be returned here will come from go-amqp. Cleanup of _our_ internal state +// will always happen, if needed. +func (l *Links[LinkT]) closePartitionLinkIfMatch(ctx context.Context, partitionID string, linkName string) error { + l.linksMu.RLock() + current, exists := l.links[partitionID] + l.linksMu.RUnlock() + + if !exists || + current.Link().LinkName() != linkName { // we've already created a new link, their link was stale. + return nil + } + + l.linksMu.Lock() + defer l.linksMu.Unlock() + + current, exists = l.links[partitionID] + + if !exists || + current.Link().LinkName() != linkName { // we've already created a new link, their link was stale. + return nil + } + + delete(l.links, partitionID) + return current.Close(ctx) +} + +func (l *Links[LinkT]) closeManagementLinkIfMatch(ctx context.Context, linkName string) error { + l.managementLinkMu.Lock() + defer l.managementLinkMu.Unlock() + + if l.managementLink != nil && l.managementLink.Link().LinkName() == linkName { + err := l.managementLink.Close(ctx) + l.managementLink = nil + return err + } + + return nil +} + +type linkState[LinkT AMQPLink] struct { + // connID is an arbitrary (but unique) integer that represents the + // current connection. This comes back from the Namespace, anytime + // it hands back a connection. + connID uint64 + + // link will be either an [amqpwrap.AMQPSenderCloser], [amqpwrap.AMQPReceiverCloser] or [amqpwrap.RPCLink] + link *LinkT + + // partitionID, if available. + partitionID string + + // cancelAuth cancels the backround claim negotation for this link. + cancelAuth func() + + // optional session, if we created one for this + // link. + session amqpwrap.AMQPSession +} + +// String returns a string that can be used for logging, of the format: +// (c:,l:<5 characters of link id>) +// +// It can also handle nil and partial initialization. +func (ls *linkState[LinkT]) String() string { + if ls == nil { + return "none" + } + + linkName := "" + + if ls.link != nil { + linkName = ls.Link().LinkName() + } + + return formatLogPrefix(ls.connID, linkName, ls.partitionID) +} + +// Close cancels the background authentication loop for this link and +// then closes the AMQP links. +// NOTE: this avoids any issues where closing fails on the broker-side or +// locally and we leak a goroutine. +func (ls *linkState[LinkT]) Close(ctx context.Context) error { + if ls.cancelAuth != nil { + ls.cancelAuth() + } + + if ls.link != nil { + return ls.Link().Close(ctx) + } + + return nil +} + +func (ls *linkState[LinkT]) PartitionID() string { + return ls.partitionID +} + +func (ls *linkState[LinkT]) ConnID() uint64 { + return ls.connID +} + +func (ls *linkState[LinkT]) Link() LinkT { + return *ls.link +} + +// LinkWithID is a readonly interface over the top of a linkState. +type LinkWithID[LinkT AMQPLink] interface { + ConnID() uint64 + Link() LinkT + PartitionID() string + Close(ctx context.Context) error + String() string +} + +func formatLogPrefix(connID uint64, linkName, partitionID string) string { + return fmt.Sprintf("c:%d,l:%.5s,p:%s", connID, linkName, partitionID) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links_recover.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links_recover.go new file mode 100644 index 00000000000..b1da12ccd8f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/links_recover.go @@ -0,0 +1,155 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + "errors" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils" +) + +type LinkRetrier[LinkT AMQPLink] struct { + GetLink func(ctx context.Context, partitionID string) (LinkWithID[LinkT], error) + CloseLink func(ctx context.Context, partitionID string, linkName string) error + NSRecover func(ctx context.Context, connID uint64) error +} + +type RetryCallback[LinkT AMQPLink] func(ctx context.Context, lwid LinkWithID[LinkT]) error + +// Retry runs the fn argument in a loop, respecting retry counts. +// If connection/link failures occur it also takes care of running recovery logic +// to bring them back, or return an appropriate error if retries are exhausted. +func (l LinkRetrier[LinkT]) Retry(ctx context.Context, + eventName azlog.Event, + operation string, + partitionID string, + retryOptions exported.RetryOptions, + fn RetryCallback[LinkT]) error { + didQuickRetry := false + + isFatalErrorFunc := func(err error) bool { + return GetRecoveryKind(err) == RecoveryKindFatal + } + + currentPrefix := "" + + prefix := func() string { + return currentPrefix + } + + return utils.Retry(ctx, eventName, prefix, retryOptions, func(ctx context.Context, args *utils.RetryFnArgs) error { + if err := l.RecoverIfNeeded(ctx, args.LastErr); err != nil { + return err + } + + linkWithID, err := l.GetLink(ctx, partitionID) + + if err != nil { + return err + } + + currentPrefix = linkWithID.String() + + if err := fn(ctx, linkWithID); err != nil { + if args.I == 0 && !didQuickRetry && IsQuickRecoveryError(err) { + // go-amqp will asynchronously handle detaches. This means errors that you get + // back from Send(), for instance, can actually be from much earlier in time + // depending on the last time you called into Send(). + // + // This means we'll sometimes do an unneeded sleep after a failed retry when + // it would have just immediately worked. To counteract that we'll do a one-time + // quick attempt to recreate link immediately if we see a detach error. This might + // waste a bit of time attempting to do the creation, but since it's just link creation + // it should be fairly fast. + // + // So when we've received a detach is: + // 0th attempt + // extra immediate 0th attempt (if last error was detach) + // (actual retries) + // + // Whereas normally you'd do (for non-detach errors): + // 0th attempt + // (actual retries) + azlog.Writef(exported.EventConn, "(%s, %s) Link was previously detached. Attempting quick reconnect to recover from error: %s", linkWithID.String(), operation, err.Error()) + didQuickRetry = true + args.ResetAttempts() + } + + return err + } + + return nil + }, isFatalErrorFunc) +} + +func (l LinkRetrier[LinkT]) RecoverIfNeeded(ctx context.Context, err error) error { + rk := GetRecoveryKind(err) + + switch rk { + case RecoveryKindNone: + return nil + case RecoveryKindLink: + var awErr amqpwrap.Error + + if !errors.As(err, &awErr) { + azlog.Writef(exported.EventConn, "RecoveryKindLink, but not an amqpwrap.Error: %T,%v", err, err) + return nil + } + + if err := l.CloseLink(ctx, awErr.PartitionID, awErr.LinkName); err != nil { + azlog.Writef(exported.EventConn, "(%s) Error when cleaning up old link for link recovery: %s", formatLogPrefix(awErr.ConnID, awErr.LinkName, awErr.PartitionID), err) + return err + } + + return nil + case RecoveryKindConn: + var awErr amqpwrap.Error + + if !errors.As(err, &awErr) { + azlog.Writef(exported.EventConn, "RecoveryKindConn, but not an amqpwrap.Error: %T,%v", err, err) + return nil + } + + // We only close _this_ partition's link. Other partitions will also get an error, and will recover. + // We used to close _all_ the links, but no longer do that since it's possible (when we do receiver + // redirect) to have more than one active connection at a time which means not all links would be + // affected when a single connection goes down. + if err := l.CloseLink(ctx, awErr.PartitionID, awErr.LinkName); err != nil { + azlog.Writef(exported.EventConn, "(%s) Error when cleaning up old link: %s", formatLogPrefix(awErr.ConnID, awErr.LinkName, awErr.PartitionID), err) + + // NOTE: this is best effort - it's probable the connection is dead anyways so we'll log + // but ignore the error for recovery purposes. + } + + // There are two possibilities here: + // + // 1. (stale) The caller got this error but the `lwid` they're passing us is 'stale' - ie, ' + // the connection the error happened on doesn't exist anymore (we recovered already) or + // the link itself is no longer active in our cache. + // + // 2. (current) The caller got this error and is the current link and/or connection, so we're going to + // need to recycle the connection (possibly) and links. + // + // For #1, we basically don't need to do anything. Recover(old-connection-id) will be a no-op + // and the closePartitionLinkIfMatch() will no-op as well since the link they passed us will + // not match the current link. + // + // For #2, we may recreate the connection. It's possible we won't if the connection itself + // has already been recovered by another goroutine. + err := l.NSRecover(ctx, awErr.ConnID) + + if err != nil { + azlog.Writef(exported.EventConn, "(%s) Failure recovering connection for link: %s", formatLogPrefix(awErr.ConnID, awErr.LinkName, awErr.PartitionID), err) + return err + } + + return nil + default: + return err + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace.go new file mode 100644 index 00000000000..dd19b713a7a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace.go @@ -0,0 +1,512 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "runtime" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/telemetry" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils" + "github.com/Azure/go-amqp" +) + +var rootUserAgent = telemetry.Format("azeventhubs", Version) + +type ( + // Namespace is an abstraction over an amqp.Client, allowing us to hold onto a single + // instance of a connection per client.. + Namespace struct { + // NOTE: values need to be 64-bit aligned. Simplest way to make sure this happens + // is just to make it the first value in the struct + // See: + // Godoc: https://pkg.go.dev/sync/atomic#pkg-note-BUG + // PR: https://github.com/Azure/azure-sdk-for-go/pull/16847 + connID uint64 + + FQDN string + TokenProvider *sbauth.TokenProvider + tlsConfig *tls.Config + userAgent string + + newWebSocketConn func(ctx context.Context, args exported.WebSocketConnParams) (net.Conn, error) + + // NOTE: exported only so it can be checked in a test + RetryOptions exported.RetryOptions + + clientMu sync.RWMutex + client amqpwrap.AMQPClient + negotiateClaimMu sync.Mutex + // indicates that the client was closed permanently, and not just + // for recovery. + closedPermanently bool + + // newClientFn exists so we can stub out newClient for unit tests. + newClientFn func(ctx context.Context, connID uint64) (amqpwrap.AMQPClient, error) + } + + // NamespaceOption provides structure for configuring a new Event Hub namespace + NamespaceOption func(h *Namespace) error +) + +// NamespaceWithNewAMQPLinks is the Namespace surface for consumers of AMQPLinks. +type NamespaceWithNewAMQPLinks interface { + Check() error +} + +// NamespaceForAMQPLinks is the Namespace surface needed for the internals of AMQPLinks. +type NamespaceForAMQPLinks interface { + NegotiateClaim(ctx context.Context, entityPath string) (context.CancelFunc, <-chan struct{}, error) + NewAMQPSession(ctx context.Context) (amqpwrap.AMQPSession, uint64, error) + NewRPCLink(ctx context.Context, managementPath string) (amqpwrap.RPCLink, uint64, error) + GetEntityAudience(entityPath string) string + + // Recover destroys the currently held AMQP connection and recreates it, if needed. + // + // NOTE: cancelling the context only cancels the initialization of a new AMQP + // connection - the previous connection is always closed. + Recover(ctx context.Context, clientRevision uint64) error + + Close(ctx context.Context, permanently bool) error +} + +// NamespaceWithConnectionString configures a namespace with the information provided in a Event Hub connection string +func NamespaceWithConnectionString(connStr string) NamespaceOption { + return func(ns *Namespace) error { + props, err := exported.ParseConnectionString(connStr) + if err != nil { + return err + } + + ns.FQDN = props.FullyQualifiedNamespace + + provider, err := sbauth.NewTokenProviderWithConnectionString(props) + if err != nil { + return err + } + + ns.TokenProvider = provider + return nil + } +} + +// NamespaceWithTLSConfig appends to the TLS config. +func NamespaceWithTLSConfig(tlsConfig *tls.Config) NamespaceOption { + return func(ns *Namespace) error { + ns.tlsConfig = tlsConfig + return nil + } +} + +// NamespaceWithUserAgent appends to the root user-agent value. +func NamespaceWithUserAgent(userAgent string) NamespaceOption { + return func(ns *Namespace) error { + ns.userAgent = userAgent + return nil + } +} + +// NamespaceWithWebSocket configures the namespace and all entities to use wss:// rather than amqps:// +func NamespaceWithWebSocket(newWebSocketConn func(ctx context.Context, args exported.WebSocketConnParams) (net.Conn, error)) NamespaceOption { + return func(ns *Namespace) error { + ns.newWebSocketConn = newWebSocketConn + return nil + } +} + +// NamespaceWithTokenCredential sets the token provider on the namespace +// fullyQualifiedNamespace is the Event Hub namespace name (ex: myservicebus.servicebus.windows.net) +func NamespaceWithTokenCredential(fullyQualifiedNamespace string, tokenCredential azcore.TokenCredential) NamespaceOption { + return func(ns *Namespace) error { + ns.TokenProvider = sbauth.NewTokenProvider(tokenCredential) + ns.FQDN = fullyQualifiedNamespace + return nil + } +} + +func NamespaceWithRetryOptions(retryOptions exported.RetryOptions) NamespaceOption { + return func(ns *Namespace) error { + ns.RetryOptions = retryOptions + return nil + } +} + +// NewNamespace creates a new namespace configured through NamespaceOption(s) +func NewNamespace(opts ...NamespaceOption) (*Namespace, error) { + ns := &Namespace{} + + ns.newClientFn = ns.newClientImpl + + for _, opt := range opts { + err := opt(ns) + if err != nil { + return nil, err + } + } + + return ns, nil +} + +func (ns *Namespace) newClientImpl(ctx context.Context, connID uint64) (amqpwrap.AMQPClient, error) { + connOptions := amqp.ConnOptions{ + SASLType: amqp.SASLTypeAnonymous(), + MaxSessions: 65535, + Properties: map[string]any{ + "product": "MSGolangClient", + "version": Version, + "platform": runtime.GOOS, + "framework": runtime.Version(), + "user-agent": ns.getUserAgent(), + }, + } + + if ns.tlsConfig != nil { + connOptions.TLSConfig = ns.tlsConfig + } + + if ns.newWebSocketConn != nil { + nConn, err := ns.newWebSocketConn(ctx, exported.WebSocketConnParams{ + Host: ns.getWSSHostURI() + "$servicebus/websocket", + }) + + if err != nil { + return nil, err + } + + connOptions.HostName = ns.FQDN + client, err := amqp.NewConn(ctx, nConn, &connOptions) + return &amqpwrap.AMQPClientWrapper{Inner: client, ConnID: connID}, err + } + + client, err := amqp.Dial(ctx, ns.getAMQPHostURI(), &connOptions) + return &amqpwrap.AMQPClientWrapper{Inner: client, ConnID: connID}, err +} + +// NewAMQPSession creates a new AMQP session with the internally cached *amqp.Client. +// Returns a closeable AMQP session and the current client revision. +func (ns *Namespace) NewAMQPSession(ctx context.Context) (amqpwrap.AMQPSession, uint64, error) { + client, clientRevision, err := ns.GetAMQPClientImpl(ctx) + + if err != nil { + return nil, 0, err + } + + session, err := client.NewSession(ctx, nil) + + if err != nil { + return nil, 0, err + } + + return session, clientRevision, err +} + +// Close closes the current cached client. +func (ns *Namespace) Close(ctx context.Context, permanently bool) error { + ns.clientMu.Lock() + defer ns.clientMu.Unlock() + + if permanently { + ns.closedPermanently = true + } + + if ns.client != nil { + err := ns.client.Close() + ns.client = nil + + if err != nil { + log.Writef(exported.EventConn, "Failed when closing AMQP connection: %s", err) + } + } + + return nil +} + +// Check returns an error if the namespace cannot be used (ie, closed permanently), or nil otherwise. +func (ns *Namespace) Check() error { + ns.clientMu.RLock() + defer ns.clientMu.RUnlock() + + if ns.closedPermanently { + return ErrClientClosed + } + + return nil +} + +var ErrClientClosed = NewErrNonRetriable("client has been closed by user") + +// Recover destroys the currently held AMQP connection and recreates it, if needed. +// +// NOTE: cancelling the context only cancels the initialization of a new AMQP +// connection - the previous connection is always closed. +func (ns *Namespace) Recover(ctx context.Context, theirConnID uint64) error { + if err := ns.Check(); err != nil { + return err + } + + ns.clientMu.Lock() + defer ns.clientMu.Unlock() + + if ns.closedPermanently { + return ErrClientClosed + } + + if ns.connID != theirConnID { + log.Writef(exported.EventConn, "Skipping connection recovery, already recovered: %d vs %d. Links will still be recovered.", ns.connID, theirConnID) + return nil + } + + if ns.client != nil { + oldClient := ns.client + ns.client = nil + + if err := oldClient.Close(); err != nil { + // the error on close isn't critical, we don't need to exit or + // return it. + log.Writef(exported.EventConn, "Error closing old client: %s", err.Error()) + } + } + + log.Writef(exported.EventConn, "Creating a new client (rev:%d)", ns.connID) + + if _, _, err := ns.updateClientWithoutLock(ctx); err != nil { + return err + } + + return nil +} + +// negotiateClaimFn matches the signature for NegotiateClaim, and is used when we want to stub things out for tests. +type negotiateClaimFn func( + ctx context.Context, audience string, conn amqpwrap.AMQPClient, provider auth.TokenProvider) error + +// negotiateClaim performs initial authentication and starts periodic refresh of credentials. +// the returned func is to cancel() the refresh goroutine. +func (ns *Namespace) NegotiateClaim(ctx context.Context, entityPath string) (context.CancelFunc, <-chan struct{}, error) { + return ns.startNegotiateClaimRenewer(ctx, + entityPath, + NegotiateClaim, + nextClaimRefreshDuration) +} + +// startNegotiateClaimRenewer does an initial claim request and then starts a goroutine that +// continues to automatically refresh in the background. +// Returns a func() that can be used to cancel the background renewal, a channel that will be closed +// when the background renewal stops or an error. +func (ns *Namespace) startNegotiateClaimRenewer(ctx context.Context, + entityPath string, + cbsNegotiateClaim negotiateClaimFn, + nextClaimRefreshDurationFn func(expirationTime time.Time, currentTime time.Time) time.Duration) (func(), <-chan struct{}, error) { + audience := ns.GetEntityAudience(entityPath) + + refreshClaim := func(ctx context.Context) (time.Time, error) { + log.Writef(exported.EventAuth, "(%s) refreshing claim", entityPath) + + amqpClient, clientRevision, err := ns.GetAMQPClientImpl(ctx) + + if err != nil { + return time.Time{}, err + } + + token, expiration, err := ns.TokenProvider.GetTokenAsTokenProvider(audience) + + if err != nil { + log.Writef(exported.EventAuth, "(%s) negotiate claim, failed getting token: %s", entityPath, err.Error()) + return time.Time{}, err + } + + log.Writef(exported.EventAuth, "(%s) negotiate claim, token expires on %s", entityPath, expiration.Format(time.RFC3339)) + + // You're not allowed to have multiple $cbs links open in a single connection. + // The current cbs.NegotiateClaim implementation automatically creates and shuts + // down it's own link so we have to guard against that here. + ns.negotiateClaimMu.Lock() + err = cbsNegotiateClaim(ctx, audience, amqpClient, token) + ns.negotiateClaimMu.Unlock() + + if err != nil { + // Note we only handle connection recovery here since (currently) + // the negotiateClaim code creates it's own link each time. + if GetRecoveryKind(err) == RecoveryKindConn { + if err := ns.Recover(ctx, clientRevision); err != nil { + log.Writef(exported.EventAuth, "(%s) negotiate claim, failed in connection recovery: %s", entityPath, err) + } + } + + log.Writef(exported.EventAuth, "(%s) negotiate claim, failed: %s", entityPath, err.Error()) + return time.Time{}, err + } + + return expiration, nil + } + + expiresOn, err := refreshClaim(ctx) + + if err != nil { + return nil, nil, err + } + + // start the periodic refresh of credentials + refreshCtx, cancelRefreshCtx := context.WithCancel(context.Background()) + refreshStoppedCh := make(chan struct{}) + + // connection strings with embedded SAS tokens will return a zero expiration time since they can't be renewed. + if expiresOn.IsZero() { + log.Writef(exported.EventAuth, "Token does not have an expiration date, no background renewal needed.") + + // cancel everything related to the claims refresh loop. + cancelRefreshCtx() + close(refreshStoppedCh) + + return func() {}, refreshStoppedCh, nil + } + + go func() { + defer cancelRefreshCtx() + defer close(refreshStoppedCh) + + TokenRefreshLoop: + for { + nextClaimAt := nextClaimRefreshDurationFn(expiresOn, time.Now()) + + log.Writef(exported.EventAuth, "(%s) next refresh in %s", entityPath, nextClaimAt) + + select { + case <-refreshCtx.Done(): + return + case <-time.After(nextClaimAt): + for { + err := utils.Retry(refreshCtx, exported.EventAuth, func() string { return "NegotiateClaimRefresh" }, ns.RetryOptions, func(ctx context.Context, args *utils.RetryFnArgs) error { + tmpExpiresOn, err := refreshClaim(ctx) + + if err != nil { + return err + } + + expiresOn = tmpExpiresOn + return nil + }, IsFatalEHError) + + if err == nil { + break + } + + if GetRecoveryKind(err) == RecoveryKindFatal { + log.Writef(exported.EventAuth, "[%s] fatal error, stopping token refresh loop: %s", entityPath, err.Error()) + break TokenRefreshLoop + } + } + } + } + }() + + return func() { + cancelRefreshCtx() + <-refreshStoppedCh + }, refreshStoppedCh, nil +} + +func (ns *Namespace) GetAMQPClientImpl(ctx context.Context) (amqpwrap.AMQPClient, uint64, error) { + if err := ns.Check(); err != nil { + return nil, 0, err + } + + ns.clientMu.Lock() + defer ns.clientMu.Unlock() + + if ns.closedPermanently { + return nil, 0, ErrClientClosed + } + + return ns.updateClientWithoutLock(ctx) +} + +// updateClientWithoutLock takes care of initializing a client (if needed) +// and returns the initialized client and it's connection ID, or an error. +func (ns *Namespace) updateClientWithoutLock(ctx context.Context) (amqpwrap.AMQPClient, uint64, error) { + if ns.client != nil { + return ns.client, ns.connID, nil + } + + connStart := time.Now() + log.Writef(exported.EventConn, "Creating new client, current rev: %d", ns.connID) + + newConnID := ns.connID + 1 + tempClient, err := ns.newClientFn(ctx, newConnID) + + if err != nil { + return nil, 0, err + } + + ns.connID = newConnID + ns.client = tempClient + log.Writef(exported.EventConn, "Client created, new rev: %d, took %dms", ns.connID, time.Since(connStart)/time.Millisecond) + + return ns.client, ns.connID, err +} + +func (ns *Namespace) getWSSHostURI() string { + return fmt.Sprintf("wss://%s/", ns.FQDN) +} + +func (ns *Namespace) getAMQPHostURI() string { + if ns.TokenProvider.InsecureDisableTLS { + return fmt.Sprintf("amqp://%s/", ns.FQDN) + } else { + return fmt.Sprintf("amqps://%s/", ns.FQDN) + } +} + +func (ns *Namespace) GetHTTPSHostURI() string { + return fmt.Sprintf("https://%s/", ns.FQDN) +} + +func (ns *Namespace) GetEntityAudience(entityPath string) string { + return ns.getAMQPHostURI() + entityPath +} + +func (ns *Namespace) getUserAgent() string { + userAgent := rootUserAgent + if ns.userAgent != "" { + userAgent = fmt.Sprintf("%s %s", ns.userAgent, userAgent) + } + return userAgent +} + +// nextClaimRefreshDuration figures out the proper interval for the next authorization +// refresh. +// +// It applies a few real world adjustments: +// - We assume the expiration time is 10 minutes ahead of when it actually is, to adjust for clock drift. +// - We don't let the refresh interval fall below 2 minutes +// - We don't let the refresh interval go above 49 days +// +// This logic is from here: +// https://github.com/Azure/azure-sdk-for-net/blob/bfd3109d0f9afa763131731d78a31e39c81101b3/sdk/servicebus/Azure.Messaging.ServiceBus/src/Amqp/AmqpConnectionScope.cs#L998 +func nextClaimRefreshDuration(expirationTime time.Time, currentTime time.Time) time.Duration { + const min = 2 * time.Minute + const max = 49 * 24 * time.Hour + const clockDrift = 10 * time.Minute + + var refreshDuration = expirationTime.Sub(currentTime) - clockDrift + + if refreshDuration < min { + return min + } else if refreshDuration > max { + return max + } + + return refreshDuration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace_eh.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace_eh.go new file mode 100644 index 00000000000..3d827c40671 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/namespace_eh.go @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package internal + +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" +) + +func (l *rpcLink) LinkName() string { + return l.sender.LinkName() +} + +func (ns *Namespace) NewRPCLink(ctx context.Context, managementPath string) (amqpwrap.RPCLink, uint64, error) { + client, connID, err := ns.GetAMQPClientImpl(ctx) + + if err != nil { + return nil, 0, err + } + + rpcLink, err := NewRPCLink(ctx, RPCLinkArgs{ + Client: client, + Address: managementPath, + LogEvent: exported.EventProducer, + }) + + if err != nil { + return nil, 0, err + } + + return rpcLink, connID, nil +} + +func (ns *Namespace) GetTokenForEntity(eventHub string) (*auth.Token, error) { + audience := ns.GetEntityAudience(eventHub) + return ns.TokenProvider.GetToken(audience) +} + +type NamespaceForManagementOps interface { + NamespaceForAMQPLinks + GetTokenForEntity(eventHub string) (*auth.Token, error) +} + +// TODO: might just consolidate. +type NamespaceForProducerOrConsumer = NamespaceForManagementOps diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/rpc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/rpc.go new file mode 100644 index 00000000000..056e55251a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/rpc.go @@ -0,0 +1,444 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/go-amqp" +) + +const ( + replyPostfix = "-reply-to-" + statusCodeKey = "status-code" + descriptionKey = "status-description" + defaultReceiverCredits = 1000 +) + +var RPCLinkClosedErr = errors.New("rpc link closed") + +type ( + // rpcLink is the bidirectional communication structure used for CBS negotiation + rpcLink struct { + session amqpwrap.AMQPSession + receiver amqpwrap.AMQPReceiverCloser // *amqp.Receiver + sender amqpwrap.AMQPSenderCloser // *amqp.Sender + + clientAddress string + sessionID *string + id string + + responseMu sync.Mutex + responseRouterClosed chan struct{} + + responseMap map[string]chan rpcResponse + rpcLinkCtx context.Context + rpcLinkCtxCancel context.CancelFunc + broadcastErr error // the error that caused the responseMap to be nil'd + + logEvent azlog.Event + + // for unit tests + uuidNewV4 func() (uuid.UUID, error) + } + + // RPCLinkOption provides a way to customize the construction of a Link + RPCLinkOption func(link *rpcLink) error + + rpcResponse struct { + message *amqp.Message + err error + } +) + +// RPCError is an error from an RPCLink. +// RPCLinks are used for communication with the $management and $cbs links. +type RPCError struct { + Resp *amqpwrap.RPCResponse + Message string +} + +// Error is a string representation of the error. +func (e RPCError) Error() string { + return e.Message +} + +// RPCCode is the code that comes back in the rpc response. This code is intended +// for programs toreact to programatically. +func (e RPCError) RPCCode() int { + return e.Resp.Code +} + +type RPCLinkArgs struct { + Client amqpwrap.AMQPClient + Address string + LogEvent azlog.Event +} + +// NewRPCLink will build a new request response link +func NewRPCLink(ctx context.Context, args RPCLinkArgs) (amqpwrap.RPCLink, error) { + session, err := args.Client.NewSession(ctx, nil) + + if err != nil { + return nil, err + } + + linkID, err := uuid.New() + if err != nil { + _ = session.Close(ctx) + return nil, err + } + + id := linkID.String() + + link := &rpcLink{ + session: session, + clientAddress: strings.Replace("$", "", args.Address, -1) + replyPostfix + id, + id: id, + + uuidNewV4: uuid.New, + responseMap: map[string]chan rpcResponse{}, + responseRouterClosed: make(chan struct{}), + logEvent: args.LogEvent, + } + + sender, err := session.NewSender( + ctx, + args.Address, + "", + nil, + ) + if err != nil { + _ = session.Close(ctx) + return nil, err + } + + receiverOpts := &amqp.ReceiverOptions{ + TargetAddress: link.clientAddress, + Credit: defaultReceiverCredits, + } + + if link.sessionID != nil { + const name = "com.microsoft:session-filter" + const code = uint64(0x00000137000000C) + if link.sessionID == nil { + receiverOpts.Filters = append(receiverOpts.Filters, amqp.NewLinkFilter(name, code, nil)) + } else { + receiverOpts.Filters = append(receiverOpts.Filters, amqp.NewLinkFilter(name, code, link.sessionID)) + } + } + + receiver, err := session.NewReceiver(ctx, args.Address, "", receiverOpts) + if err != nil { + _ = session.Close(ctx) + return nil, err + } + + link.sender = sender + link.receiver = receiver + link.rpcLinkCtx, link.rpcLinkCtxCancel = context.WithCancel(context.Background()) + + go link.responseRouter() + + return link, nil +} + +const responseRouterShutdownMessage = "Response router has shut down" + +// responseRouter is responsible for taking any messages received on the 'response' +// link and forwarding it to the proper channel. The channel is being select'd by the +// original `RPC` call. +func (l *rpcLink) responseRouter() { + defer azlog.Writef(l.logEvent, responseRouterShutdownMessage) + defer close(l.responseRouterClosed) + + for { + res, err := l.receiver.Receive(l.rpcLinkCtx, nil) + + if err != nil { + // if the link or connection has a malfunction that would require it to restart then + // we need to bail out, broadcasting to all affected callers/consumers. + if GetRecoveryKind(err) != RecoveryKindNone { + if IsCancelError(err) { + err = RPCLinkClosedErr + } else { + azlog.Writef(l.logEvent, "Error in RPCLink, stopping response router: %s", err.Error()) + } + + l.broadcastError(err) + break + } + + azlog.Writef(l.logEvent, "Non-fatal error in RPCLink, starting to receive again: %s", err.Error()) + continue + } + + // I don't believe this should happen. The JS version of this same code + // ignores errors as well since responses should always be correlated + // to actual send requests. So this is just here for completeness. + if res == nil { + azlog.Writef(l.logEvent, "RPCLink received no error, but also got no response") + continue + } + + autogenMessageId, ok := res.Properties.CorrelationID.(string) + + if !ok { + azlog.Writef(l.logEvent, "RPCLink message received without a CorrelationID %v", res) + continue + } + + ch := l.deleteChannelFromMap(autogenMessageId) + + if ch == nil { + azlog.Writef(l.logEvent, "RPCLink had no response channel for correlation ID %v", autogenMessageId) + continue + } + + ch <- rpcResponse{message: res, err: err} + } +} + +func (l *rpcLink) RPC(ctx context.Context, msg *amqp.Message) (*amqpwrap.RPCResponse, error) { + resp, err := l.internalRPC(ctx, msg) + + if err != nil { + return nil, amqpwrap.WrapError(err, l.ConnID(), l.LinkName(), "") + } + + return resp, nil +} + +// RPC sends a request and waits on a response for that request +func (l *rpcLink) internalRPC(ctx context.Context, msg *amqp.Message) (*amqpwrap.RPCResponse, error) { + copiedMessage, messageID, err := addMessageID(msg, l.uuidNewV4) + + if err != nil { + return nil, err + } + + // use the copiedMessage from this point + msg = copiedMessage + + const altStatusCodeKey, altDescriptionKey = "statusCode", "statusDescription" + + msg.Properties.ReplyTo = &l.clientAddress + + if msg.ApplicationProperties == nil { + msg.ApplicationProperties = make(map[string]any) + } + + if _, ok := msg.ApplicationProperties["server-timeout"]; !ok { + if deadline, ok := ctx.Deadline(); ok { + msg.ApplicationProperties["server-timeout"] = uint(time.Until(deadline) / time.Millisecond) + } + } + + responseCh := l.addChannelToMap(messageID) + + if responseCh == nil { + return nil, l.broadcastErr + } + + err = l.sender.Send(ctx, msg, nil) + + if err != nil { + l.deleteChannelFromMap(messageID) + return nil, fmt.Errorf("failed to send message with ID %s: %w", messageID, err) + } + + var res *amqp.Message + + select { + case <-ctx.Done(): + l.deleteChannelFromMap(messageID) + res, err = nil, ctx.Err() + case resp := <-responseCh: + // this will get triggered by the loop in 'startReceiverRouter' when it receives + // a message with our autoGenMessageID set in the correlation_id property. + res, err = resp.message, resp.err + } + + if err != nil { + return nil, err + } + + var statusCode int + statusCodeCandidates := []string{statusCodeKey, altStatusCodeKey} + for i := range statusCodeCandidates { + if rawStatusCode, ok := res.ApplicationProperties[statusCodeCandidates[i]]; ok { + if cast, ok := rawStatusCode.(int32); ok { + statusCode = int(cast) + break + } + + return nil, errors.New("status code was not of expected type int32") + } + } + if statusCode == 0 { + return nil, errors.New("status codes was not found on rpc message") + } + + var description string + descriptionCandidates := []string{descriptionKey, altDescriptionKey} + for i := range descriptionCandidates { + if rawDescription, ok := res.ApplicationProperties[descriptionCandidates[i]]; ok { + if description, ok = rawDescription.(string); ok || rawDescription == nil { + break + } else { + return nil, errors.New("status description was not of expected type string") + } + } + } + + response := &amqpwrap.RPCResponse{ + Code: int(statusCode), + Description: description, + Message: res, + } + + if err := l.receiver.AcceptMessage(ctx, res); err != nil { + return response, fmt.Errorf("failed accepting message on rpc link: %w", err) + } + + var rpcErr RPCError + + if asRPCError(response, &rpcErr) { + return nil, rpcErr + } + + return response, err +} + +func (l *rpcLink) ConnID() uint64 { + return l.session.ConnID() +} + +// Close the link receiver, sender and session +func (l *rpcLink) Close(ctx context.Context) error { + l.rpcLinkCtxCancel() + + select { + case <-l.responseRouterClosed: + case <-ctx.Done(): + } + + if l.session != nil { + return l.session.Close(ctx) + } + + return nil +} + +// addChannelToMap adds a channel which will be used by the response router to +// notify when there is a response to the request. +// If l.responseMap is nil (for instance, via broadcastError) this function will +// return nil. +func (l *rpcLink) addChannelToMap(messageID string) chan rpcResponse { + l.responseMu.Lock() + defer l.responseMu.Unlock() + + if l.responseMap == nil { + return nil + } + + responseCh := make(chan rpcResponse, 1) + l.responseMap[messageID] = responseCh + + return responseCh +} + +// deleteChannelFromMap removes the message from our internal map and returns +// a channel that the corresponding RPC() call is waiting on. +// If l.responseMap is nil (for instance, via broadcastError) this function will +// return nil. +func (l *rpcLink) deleteChannelFromMap(messageID string) chan rpcResponse { + l.responseMu.Lock() + defer l.responseMu.Unlock() + + if l.responseMap == nil { + return nil + } + + ch := l.responseMap[messageID] + delete(l.responseMap, messageID) + + return ch +} + +// broadcastError notifies the anyone waiting for a response that the link/session/connection +// has closed. +func (l *rpcLink) broadcastError(err error) { + l.responseMu.Lock() + defer l.responseMu.Unlock() + + for _, ch := range l.responseMap { + ch <- rpcResponse{err: err} + } + + l.broadcastErr = err + l.responseMap = nil +} + +// addMessageID generates a unique UUID for the message. When the service +// responds it will fill out the correlation ID property of the response +// with this ID, allowing us to link the request and response together. +// +// NOTE: this function copies 'message', adding in a 'Properties' object +// if it does not already exist. +func addMessageID(message *amqp.Message, uuidNewV4 func() (uuid.UUID, error)) (*amqp.Message, string, error) { + uuid, err := uuidNewV4() + + if err != nil { + return nil, "", err + } + + autoGenMessageID := uuid.String() + + // we need to modify the message so we'll make a copy + copiedMessage := *message + + if message.Properties == nil { + copiedMessage.Properties = &amqp.MessageProperties{ + MessageID: autoGenMessageID, + } + } else { + // properties already exist, make a copy and then update + // the message ID + copiedProperties := *message.Properties + copiedProperties.MessageID = autoGenMessageID + + copiedMessage.Properties = &copiedProperties + } + + return &copiedMessage, autoGenMessageID, nil +} + +// asRPCError checks to see if the res is actually a failed request +// (where failed means the status code was non-2xx). If so, +// it returns true and updates the struct pointed to by err. +func asRPCError(res *amqpwrap.RPCResponse, err *RPCError) bool { + if res == nil { + return false + } + + if res.Code >= 200 && res.Code < 300 { + return false + } + + *err = RPCError{ + Message: fmt.Sprintf("rpc: failed, status code %d and description: %s", res.Code, res.Description), + Resp: res, + } + + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas/sas.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas/sas.go new file mode 100644 index 00000000000..0b5854ea277 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas/sas.go @@ -0,0 +1,179 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package sas provides SAS token functionality which implements TokenProvider from package auth for use with Azure +// Event Hubs and Service Bus. + +package sas + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" +) + +type ( + // Signer provides SAS token generation for use in Service Bus and Event Hub + Signer struct { + KeyName string + Key string + + // getNow is stubabble for unit tests and is just an alias for time.Now() + getNow func() time.Time + } + + // TokenProvider is a SAS claims-based security token provider + TokenProvider struct { + // expiryDuration is only used when we're generating SAS tokens. It gets used + // to calculate the expiration timestamp for a token. Pre-computed SAS tokens + // passed in TokenProviderWithSAS() are not affected. + expiryDuration time.Duration + + signer *Signer + + // sas is a precomputed SAS token. This implies that the caller has some other + // method for generating tokens. + sas string + } + + // TokenProviderOption provides configuration options for SAS Token Providers + TokenProviderOption func(*TokenProvider) error +) + +// TokenProviderWithKey configures a SAS TokenProvider to use the given key name and key (secret) for signing +func TokenProviderWithKey(keyName, key string, expiryDuration time.Duration) TokenProviderOption { + return func(provider *TokenProvider) error { + + if expiryDuration == 0 { + expiryDuration = 2 * time.Hour + } + + provider.expiryDuration = expiryDuration + provider.signer = NewSigner(keyName, key) + return nil + } +} + +// TokenProviderWithSAS configures the token provider with a pre-created SharedAccessSignature. +// auth.Token's coming back from this TokenProvider instance will always have '0' as the expiration +// date. +func TokenProviderWithSAS(sas string) TokenProviderOption { + return func(provider *TokenProvider) error { + provider.sas = sas + return nil + } +} + +// NewTokenProvider builds a SAS claims-based security token provider +func NewTokenProvider(opts ...TokenProviderOption) (*TokenProvider, error) { + provider := new(TokenProvider) + + for _, opt := range opts { + err := opt(provider) + if err != nil { + return nil, err + } + } + return provider, nil +} + +// GetToken gets a CBS SAS token +func (t *TokenProvider) GetToken(audience string) (*auth.Token, error) { + if t.sas != "" { + // the expiration date doesn't matter here so we'll just set it 0. + return auth.NewToken(auth.CBSTokenTypeSAS, t.sas, "0"), nil + } + + signature, expiry, err := t.signer.SignWithDuration(audience, t.expiryDuration) + + if err != nil { + return nil, err + } + + return auth.NewToken(auth.CBSTokenTypeSAS, signature, expiry), nil +} + +// NewSigner builds a new SAS signer for use in generation Service Bus and Event Hub SAS tokens +func NewSigner(keyName, key string) *Signer { + return &Signer{ + KeyName: keyName, + Key: key, + + getNow: time.Now, + } +} + +// SignWithDuration signs a given for a period of time from now +func (s *Signer) SignWithDuration(uri string, interval time.Duration) (signature, expiry string, err error) { + expiry = signatureExpiry(s.getNow().UTC(), interval) + sig, err := s.SignWithExpiry(uri, expiry) + + if err != nil { + return "", "", err + } + + return sig, expiry, nil +} + +// SignWithExpiry signs a given uri with a given expiry string +func (s *Signer) SignWithExpiry(uri, expiry string) (string, error) { + audience := strings.ToLower(url.QueryEscape(uri)) + sts := stringToSign(audience, expiry) + sig, err := s.signString(sts) + + if err != nil { + return "", err + } + + return fmt.Sprintf("SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s", audience, sig, expiry, s.KeyName), nil +} + +// CreateConnectionStringWithSharedAccessSignature generates a new connection string with +// an embedded SharedAccessSignature and expiration. +// Ex: Endpoint=sb://.servicebus.windows.net;SharedAccessSignature=SharedAccessSignature sr=.servicebus.windows.net&sig=&se=&skn=" +func CreateConnectionStringWithSASUsingExpiry(connectionString string, expiry time.Time) (string, error) { + parsed, err := exported.ParseConnectionString(connectionString) + + if err != nil { + return "", err + } + + signer := NewSigner(*parsed.SharedAccessKeyName, *parsed.SharedAccessKey) + + sig, err := signer.SignWithExpiry(parsed.FullyQualifiedNamespace, fmt.Sprintf("%d", expiry.Unix())) + + if err != nil { + return "", err + } + + return fmt.Sprintf("Endpoint=sb://%s;SharedAccessSignature=%s", parsed.FullyQualifiedNamespace, sig), nil +} + +func signatureExpiry(from time.Time, interval time.Duration) string { + t := from.Add(interval).Round(time.Second).Unix() + return strconv.FormatInt(t, 10) +} + +func stringToSign(uri, expiry string) string { + return uri + "\n" + expiry +} + +func (s *Signer) signString(str string) (string, error) { + h := hmac.New(sha256.New, []byte(s.Key)) + _, err := h.Write([]byte(str)) + + if err != nil { + return "", err + } + + encodedSig := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return url.QueryEscape(encodedSig), nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth/token_provider.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth/token_provider.go new file mode 100644 index 00000000000..f44dc22aad0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth/token_provider.go @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package sbauth + +import ( + "context" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas" +) + +// TokenProvider handles access tokens and expiration calculation for SAS +// keys (via connection strings) or TokenCredentials from Azure Identity. +type TokenProvider struct { + tokenCred azcore.TokenCredential + sasTokenProvider *sas.TokenProvider + + // InsecureDisableTLS disables TLS. This is only used if the user is connecting to localhost + // and is using an emulator connection string. See [ConnectionStringProperties.Emulator] for + // details. + InsecureDisableTLS bool +} + +// NewTokenProvider creates a tokenProvider from azcore.TokenCredential. +func NewTokenProvider(tokenCredential azcore.TokenCredential) *TokenProvider { + return &TokenProvider{tokenCred: tokenCredential} +} + +// NewTokenProviderWithConnectionString creates a tokenProvider from a connection string. +func NewTokenProviderWithConnectionString(props exported.ConnectionStringProperties) (*TokenProvider, error) { + // NOTE: this is the value we've been using since forever. AFAIK, it's arbitrary. + const defaultTokenExpiry = 2 * time.Hour + + var authOption sas.TokenProviderOption + + if props.SharedAccessSignature == nil { + authOption = sas.TokenProviderWithKey(*props.SharedAccessKeyName, *props.SharedAccessKey, defaultTokenExpiry) + } else { + authOption = sas.TokenProviderWithSAS(*props.SharedAccessSignature) + } + + provider, err := sas.NewTokenProvider(authOption) + + if err != nil { + return nil, err + } + + return &TokenProvider{sasTokenProvider: provider, InsecureDisableTLS: props.Emulator}, nil +} + +// singleUseTokenProvider allows you to wrap an *auth.Token so it can be used +// with functions that require a TokenProvider, but only actually should get +// a single token (like cbs.NegotiateClaim) +type singleUseTokenProvider auth.Token + +// GetToken will return this token. +// This function makes us compatible with auth.TokenProvider. +func (tp *singleUseTokenProvider) GetToken(uri string) (*auth.Token, error) { + return (*auth.Token)(tp), nil +} + +// GetToken will retrieve a new token. +// This function makes us compatible with auth.TokenProvider. +func (tp *TokenProvider) GetToken(uri string) (*auth.Token, error) { + token, _, err := tp.getTokenImpl(uri) + return token, err +} + +// GetToken returns a token (that is compatible as an auth.TokenProvider) and +// the calculated time when you should renew your token. +func (tp *TokenProvider) GetTokenAsTokenProvider(uri string) (*singleUseTokenProvider, time.Time, error) { + token, renewAt, err := tp.getTokenImpl(uri) + + if err != nil { + return nil, time.Time{}, err + } + + return (*singleUseTokenProvider)(token), renewAt, nil +} + +func (tp *TokenProvider) getTokenImpl(uri string) (*auth.Token, time.Time, error) { + if tp.sasTokenProvider != nil { + return tp.getSASToken(uri) + } else { + return tp.getAZCoreToken() + } +} + +func (tpa *TokenProvider) getAZCoreToken() (*auth.Token, time.Time, error) { + // not sure if URI plays in here. + accessToken, err := tpa.tokenCred.GetToken(context.TODO(), policy.TokenRequestOptions{ + Scopes: []string{ + "https://eventhubs.azure.net//.default", + }, + }) + + if err != nil { + return nil, time.Time{}, err + } + + authToken := &auth.Token{ + TokenType: auth.CBSTokenTypeJWT, + Token: accessToken.Token, + Expiry: strconv.FormatInt(accessToken.ExpiresOn.Unix(), 10), + } + + return authToken, + accessToken.ExpiresOn, + nil +} + +func (tpa *TokenProvider) getSASToken(uri string) (*auth.Token, time.Time, error) { + authToken, err := tpa.sasTokenProvider.GetToken(uri) + + if err != nil { + return nil, time.Time{}, err + } + + // we can ignore the error here since we did the string-izing of the time + // in the first place. + var expiryTime time.Time + + if authToken.Expiry != "0" { + // TODO: I'd like to just use the actual Expiry time we generated + // Filed here https://github.com/Azure/azure-sdk-for-go/issues/20468 + expiryTime = time.Now().Add(time.Minute * 15) + } + + return authToken, + expiryTime, + nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils/retrier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils/retrier.go new file mode 100644 index 00000000000..a61eb134934 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils/retrier.go @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package utils + +import ( + "context" + "errors" + "math" + "math/rand" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" +) + +type RetryFnArgs struct { + // I is the iteration of the retry "loop" and starts at 0. + // The 0th iteration is the first call, and doesn't count as a retry. + // The last try will equal RetryOptions.MaxRetries + I int32 + // LastErr is the returned error from the previous loop. + // If you have potentially expensive + LastErr error + + resetAttempts bool +} + +// ResetAttempts resets all Retry() attempts, starting back +// at iteration 0. +func (rf *RetryFnArgs) ResetAttempts() { + rf.resetAttempts = true +} + +// Retry runs a standard retry loop. It executes your passed in fn as the body of the loop. +// It returns if it exceeds the number of configured retry options or if 'isFatal' returns true. +func Retry(ctx context.Context, eventName log.Event, prefix func() string, o exported.RetryOptions, fn func(ctx context.Context, callbackArgs *RetryFnArgs) error, isFatalFn func(err error) bool) error { + if isFatalFn == nil { + panic("isFatalFn is nil, errors would panic") + } + + var ro exported.RetryOptions = o + setDefaults(&ro) + + var err error + + for i := int32(0); i <= ro.MaxRetries; i++ { + if i > 0 { + sleep := calcDelay(ro, i) + log.Writef(eventName, "(%s) Retry attempt %d sleeping for %s", prefix(), i, sleep) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(sleep): + } + } + + args := RetryFnArgs{ + I: i, + LastErr: err, + } + err = fn(ctx, &args) + + if args.resetAttempts { + log.Writef(eventName, "(%s) Resetting retry attempts", prefix()) + + // it looks weird, but we're doing -1 here because the post-increment + // will set it back to 0, which is what we want - go back to the 0th + // iteration so we don't sleep before the attempt. + // + // You'll use this when you want to get another "fast" retry attempt. + i = int32(-1) + } + + if err != nil { + if isFatalFn(err) { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + log.Writef(eventName, "(%s) Retry attempt %d was cancelled, stopping: %s", prefix(), i, err.Error()) + } else { + log.Writef(eventName, "(%s) Retry attempt %d returned non-retryable error: %s", prefix(), i, err.Error()) + } + return err + } else { + log.Writef(eventName, "(%s) Retry attempt %d returned retryable error: %s", prefix(), i, err.Error()) + } + + continue + } + + return nil + } + + return err +} + +func setDefaults(o *exported.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = 3 + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 120 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 4 * time.Second + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } +} + +// (adapted from from azcore/policy_retry) +func calcDelay(o exported.RetryOptions, try int32) time.Duration { + if try == 0 { + return 0 + } + + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/log.go new file mode 100644 index 00000000000..9e487007163 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/log.go @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" +) + +const ( + // EventConn is used whenever we create a connection or any links (ie: producers, consumers). + EventConn log.Event = exported.EventConn + + // EventAuth is used when we're doing authentication/claims negotiation. + EventAuth log.Event = exported.EventAuth + + // EventProducer represents operations that happen on Producers. + EventProducer log.Event = exported.EventProducer + + // EventConsumer represents operations that happen on Consumers. + EventConsumer log.Event = exported.EventConsumer +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/mgmt.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/mgmt.go new file mode 100644 index 00000000000..3ac3dba46bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/mgmt.go @@ -0,0 +1,253 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh" + "github.com/Azure/go-amqp" +) + +// EventHubProperties represents properties of the Event Hub, like the number of partitions. +type EventHubProperties struct { + CreatedOn time.Time + Name string + PartitionIDs []string +} + +// GetEventHubPropertiesOptions contains optional parameters for the GetEventHubProperties function +type GetEventHubPropertiesOptions struct { + // For future expansion +} + +// getEventHubProperties gets event hub properties, like the available partition IDs and when the Event Hub was created. +func getEventHubProperties[LinkT internal.AMQPLink](ctx context.Context, eventName log.Event, ns internal.NamespaceForManagementOps, links *internal.Links[LinkT], eventHub string, retryOptions RetryOptions, options *GetEventHubPropertiesOptions) (EventHubProperties, error) { + var props EventHubProperties + + err := links.RetryManagement(ctx, eventName, "getEventHubProperties", retryOptions, func(ctx context.Context, lwid internal.LinkWithID[amqpwrap.RPCLink]) error { + tmpProps, err := getEventHubPropertiesInternal(ctx, ns, lwid.Link(), eventHub, options) + + if err != nil { + return err + } + + props = tmpProps + return nil + }) + + return props, err + +} + +func getEventHubPropertiesInternal(ctx context.Context, ns internal.NamespaceForManagementOps, rpcLink amqpwrap.RPCLink, eventHub string, options *GetEventHubPropertiesOptions) (EventHubProperties, error) { + token, err := ns.GetTokenForEntity(eventHub) + + if err != nil { + return EventHubProperties{}, internal.TransformError(err) + } + + amqpMsg := &amqp.Message{ + ApplicationProperties: map[string]any{ + "operation": "READ", + "name": eventHub, + "type": "com.microsoft:eventhub", + "security_token": token.Token, + }, + } + + resp, err := rpcLink.RPC(context.Background(), amqpMsg) + + if err != nil { + return EventHubProperties{}, err + } + + if resp.Code >= 300 { + return EventHubProperties{}, fmt.Errorf("failed getting partition properties: %v", resp.Description) + } + + return newEventHubProperties(resp.Message.Value) +} + +// PartitionProperties are the properties for a single partition. +type PartitionProperties struct { + // BeginningSequenceNumber is the first sequence number for a partition. + BeginningSequenceNumber int64 + // EventHubName is the name of the Event Hub for this partition. + EventHubName string + + // IsEmpty is true if the partition is empty, false otherwise. + IsEmpty bool + + // LastEnqueuedOffset is the offset of latest enqueued event. + LastEnqueuedOffset int64 + + // LastEnqueuedOn is the date of latest enqueued event. + LastEnqueuedOn time.Time + + // LastEnqueuedSequenceNumber is the sequence number of the latest enqueued event. + LastEnqueuedSequenceNumber int64 + + // PartitionID is the partition ID of this partition. + PartitionID string +} + +// GetPartitionPropertiesOptions are the options for the GetPartitionProperties function. +type GetPartitionPropertiesOptions struct { + // For future expansion +} + +// getPartitionProperties gets properties for a specific partition. This includes data like the last enqueued sequence number, the first sequence +// number and when an event was last enqueued to the partition. +func getPartitionProperties[LinkT internal.AMQPLink](ctx context.Context, eventName log.Event, ns internal.NamespaceForManagementOps, links *internal.Links[LinkT], eventHub string, partitionID string, retryOptions RetryOptions, options *GetPartitionPropertiesOptions) (PartitionProperties, error) { + var props PartitionProperties + + err := links.RetryManagement(ctx, eventName, "getPartitionProperties", retryOptions, func(ctx context.Context, lwid internal.LinkWithID[amqpwrap.RPCLink]) error { + tmpProps, err := getPartitionPropertiesInternal(ctx, ns, lwid.Link(), eventHub, partitionID, options) + + if err != nil { + return err + } + + props = tmpProps + return nil + }) + + return props, err +} + +func getPartitionPropertiesInternal(ctx context.Context, ns internal.NamespaceForManagementOps, rpcLink amqpwrap.RPCLink, eventHub string, partitionID string, options *GetPartitionPropertiesOptions) (PartitionProperties, error) { + token, err := ns.GetTokenForEntity(eventHub) + + if err != nil { + return PartitionProperties{}, err + } + + amqpMsg := &amqp.Message{ + ApplicationProperties: map[string]any{ + "operation": "READ", + "name": eventHub, + "type": "com.microsoft:partition", + "partition": partitionID, + "security_token": token.Token, + }, + } + + resp, err := rpcLink.RPC(context.Background(), amqpMsg) + + if err != nil { + return PartitionProperties{}, internal.TransformError(err) + } + + if resp.Code >= 300 { + return PartitionProperties{}, fmt.Errorf("failed getting partition properties: %v", resp.Description) + } + + return newPartitionProperties(resp.Message.Value) +} + +func newEventHubProperties(amqpValue any) (EventHubProperties, error) { + m, ok := amqpValue.(map[string]any) + + if !ok { + return EventHubProperties{}, nil + } + + partitionIDs, ok := m["partition_ids"].([]string) + + if !ok { + return EventHubProperties{}, fmt.Errorf("invalid message format") + } + + name, ok := m["name"].(string) + + if !ok { + return EventHubProperties{}, fmt.Errorf("invalid message format") + } + + createdOn, ok := m["created_at"].(time.Time) + + if !ok { + return EventHubProperties{}, fmt.Errorf("invalid message format") + } + + return EventHubProperties{ + Name: name, + CreatedOn: createdOn, + PartitionIDs: partitionIDs, + }, nil +} + +func newPartitionProperties(amqpValue any) (PartitionProperties, error) { + m, ok := amqpValue.(map[string]any) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + eventHubName, ok := m["name"].(string) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + partition, ok := m["partition"].(string) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + beginningSequenceNumber, ok := eh.ConvertToInt64(m["begin_sequence_number"]) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + lastEnqueuedSequenceNumber, ok := eh.ConvertToInt64(m["last_enqueued_sequence_number"]) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + lastEnqueuedOffsetStr, ok := m["last_enqueued_offset"].(string) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + lastEnqueuedOffset, err := strconv.ParseInt(lastEnqueuedOffsetStr, 10, 64) + + if err != nil { + return PartitionProperties{}, fmt.Errorf("invalid message format: %w", err) + } + + lastEnqueuedTime, ok := m["last_enqueued_time_utc"].(time.Time) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + isEmpty, ok := m["is_partition_empty"].(bool) + + if !ok { + return PartitionProperties{}, errors.New("invalid message format") + } + + return PartitionProperties{ + BeginningSequenceNumber: beginningSequenceNumber, + LastEnqueuedSequenceNumber: lastEnqueuedSequenceNumber, + LastEnqueuedOffset: lastEnqueuedOffset, + LastEnqueuedOn: lastEnqueuedTime, + IsEmpty: isEmpty, + PartitionID: partition, + EventHubName: eventHubName, + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/migrationguide.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/migrationguide.md new file mode 100644 index 00000000000..4388773abdd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/migrationguide.md @@ -0,0 +1,106 @@ +# Guide to migrate from `azure-event-hubs-go` to `azeventhubs` + +This guide is intended to assist in the migration from the `azure-event-hubs-go` package to the latest beta releases (and eventual GA) of the `github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs`. + +Our goal with this newest package was to export components that can be easily integrated into multiple styles of application, while still mapping close to the underlying resources for AMQP. This includes making TCP connection sharing simple (a must when multiplexing across multiple partitions), making batching boundaries more explicit and also integrating with the `azidentity` package, opening up a large number of authentication methods. + +These changes are described in more detail, below. + +### TCP connection sharing + +In AMQP there are is a concept of a connection and links. AMQP Connections are TCP connections. Links are a logical conduit within an AMQP connection and there are typically many of them but they use the same connection and do not require their own socket. + +The prior version of this package did not allow you to share an AMQP connection when sending events, which meant sending to multiple partitions would require a TCP connection per partition. If your application used more than a few partitions this could use up a scarce resource. + +In the newer version of the library each top-level client (ProducerClient or ConsumerClient) owns their own TCP connection. For instance, in ProducerClient, sending to separate partitions creates multiple links internally, but not multiple TCP connections. ConsumerClient works similarly - it has a single TCP connection and calling ConsumerClient.NewPartitionClient creates new links, but not new TCP connections. + +If you want to split activity across multiple TCP connections you can still do so by creating multiple instances of ProducerClient or ConsumerClient. + +Some examples: + +```go +// consumerClient will own a TCP connection. +consumerClient, err := azeventhubs.NewConsumerClient(/* arguments elided for example */) + +// Close the TCP connection (and any child links) +defer consumerClient.Close(context.TODO()) + +// this call will lazily create a set of AMQP links using the consumerClient's TCP connection. +partClient0, err := consumerClient.NewPartitionClient("0", nil) +defer partClient0.Close(context.TODO()) // will close the AMQP link, not the connection + +// this call will also lazily create a set of AMQP links using the consumerClient's TCP connection. +partClient1, err := consumerClient.NewPartitionClient("1", nil) +defer partClient1.Close(context.TODO()) // will close the AMQP link, not the connection +``` + +```go +// will lazily create an AMQP connection +producerClient, err := azeventhubs.NewProducerClient(/* arguments elided for example */) + +// close the TCP connection (and any child links created for sending events) +defer producerClient.Close(context.TODO()) + +// these calls will lazily create a set of AMQP links using the producerClient's TCP connection. +producerClient.SendEventDataBatch(context.TODO(), eventDataBatchForPartition0, nil) +producerClient.SendEventDataBatch(context.TODO(), eventDataBatchForPartition1, nil) +``` + +## Clients + +The `Hub` type has been replaced by two types: + +* Consuming events, using the `azeventhubs.ConsumerClient`: [docs](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#ConsumerClient) | [example](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_events_test.go) +* Sending events, use the `azeventhubs.ProducerClient`: [docs](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#ProducerClient) | [example](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_producing_events_test.go) + +`EventProcessorHost` has been replaced by the `azeventhubs.Processor` type: [docs](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs#Processor) | [example](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go) + +## Authentication + +The older Event Hubs package provided some authentication methods like hub.NewHubFromEnvironment. These have been replaced by by using Azure Identity credentials from [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#section-readme). + +You can also still authenticate using connection strings. + +* `azeventhubs.ConsumerClient`: [using azidentity](https://github.com/Azure/azure-sdk-for-go/blob/a46bd74e113d6a045541b82a0f3f6497011d8417/sdk/messaging/azeventhubs/example_consumerclient_test.go#L16) | [using a connection string](https://github.com/Azure/azure-sdk-for-go/blob/a46bd74e113d6a045541b82a0f3f6497011d8417/sdk/messaging/azeventhubs/example_consumerclient_test.go#L30) + +* `azeventhubs.ProducerClient`: [using azidentity](https://github.com/Azure/azure-sdk-for-go/blob/a46bd74e113d6a045541b82a0f3f6497011d8417/sdk/messaging/azeventhubs/example_producerclient_test.go#L16) | [using a connection string](https://github.com/Azure/azure-sdk-for-go/blob/a46bd74e113d6a045541b82a0f3f6497011d8417/sdk/messaging/azeventhubs/example_producerclient_test.go#L30) + +## EventBatchIterator + +Sending events has changed to be more explicit about when batches are formed and sent. + +The older module had a type (EventBatchIterator). This type has been removed and replaced +with explicit batching, using `azeventhubs.EventDataBatch`. See here for an example: [link](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_producing_events_test.go). + +## Getting hub/partition information + +In the older module functions to get the partition IDs, as well as runtime properties +like the last enqueued sequence number were on the `Hub` type. These are now on both +of the client types instead (`ProducerClient`, `ConsumerClient`). + +```go +// old +hub.GetPartitionInformation(context.TODO(), "0") +hub.GetRuntimeInformation(context.TODO()) +``` + +```go +// new + +// equivalent to: hub.GetRuntimeInformation(context.TODO()) +consumerClient.GetEventHubProperties(context.TODO(), nil) + +// equivalent to: hub.GetPartitionInformation +consumerClient.GetPartitionProperties(context.TODO(), "partition-id", nil) + +// +// or, using the ProducerClient +// + +producerClient.GetEventHubProperties(context.TODO(), nil) +producerClient.GetPartitionProperties(context.TODO(), "partition-id", nil) +``` + +## Migrating from a previous checkpoint store + +See here for an example: [link](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_checkpoint_migration_test.go) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/partition_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/partition_client.go new file mode 100644 index 00000000000..8eb01be00d7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/partition_client.go @@ -0,0 +1,380 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + + "github.com/Azure/go-amqp" +) + +// DefaultConsumerGroup is the name of the default consumer group in the Event Hubs service. +const DefaultConsumerGroup = "$Default" + +const defaultPrefetchSize = int32(300) + +// defaultLinkRxBuffer is the maximum number of transfer frames we can handle +// on the Receiver. This matches the current default window size that go-amqp +// uses for sessions. +const defaultMaxCreditSize = uint32(5000) + +// StartPosition indicates the position to start receiving events within a partition. +// The default position is Latest. +// +// You can set this in the options for [ConsumerClient.NewPartitionClient]. +type StartPosition struct { + // Offset will start the consumer after the specified offset. Can be exclusive + // or inclusive, based on the Inclusive property. + // NOTE: offsets are not stable values, and might refer to different events over time + // as the Event Hub events reach their age limit and are discarded. + Offset *int64 + + // SequenceNumber will start the consumer after the specified sequence number. Can be exclusive + // or inclusive, based on the Inclusive property. + SequenceNumber *int64 + + // EnqueuedTime will start the consumer before events that were enqueued on or after EnqueuedTime. + // Can be exclusive or inclusive, based on the Inclusive property. + EnqueuedTime *time.Time + + // Inclusive configures whether the events directly at Offset, SequenceNumber or EnqueuedTime will be included (true) + // or excluded (false). + Inclusive bool + + // Earliest will start the consumer at the earliest event. + Earliest *bool + + // Latest will start the consumer after the last event. + Latest *bool +} + +// PartitionClient is used to receive events from an Event Hub partition. +// +// This type is instantiated from the [ConsumerClient] type, using [ConsumerClient.NewPartitionClient]. +type PartitionClient struct { + consumerGroup string + eventHub string + instanceID string + links internal.LinksForPartitionClient[amqpwrap.AMQPReceiverCloser] + offsetExpression string + ownerLevel *int64 + partitionID string + prefetch int32 + retryOptions RetryOptions +} + +// ReceiveEventsOptions contains optional parameters for the ReceiveEvents function +type ReceiveEventsOptions struct { + // For future expansion +} + +// ReceiveEvents receives events until 'count' events have been received or the context has +// expired or been cancelled. +// +// If your ReceiveEvents call appears to be stuck there are some common causes: +// +// 1. The PartitionClientOptions.StartPosition defaults to "Latest" when the client is created. The connection +// is lazily initialized, so it's possible the link was initialized to a position after events you've sent. +// To make this deterministic, you can choose an explicit start point using sequence number, offset or a +// timestamp. See the [PartitionClientOptions.StartPosition] field for more details. +// +// 2. You might have sent the events to a different partition than intended. By default, batches that are +// created using [ProducerClient.NewEventDataBatch] do not target a specific partition. When a partition +// is not specified, Azure Event Hubs service will choose the partition the events will be sent to. +// +// To fix this, you can specify a PartitionID as part of your [EventDataBatchOptions.PartitionID] options or +// open multiple [PartitionClient] instances, one for each partition. You can get the full list of partitions +// at runtime using [ConsumerClient.GetEventHubProperties]. See the "example_consuming_events_test.go" for +// an example of this pattern. +// +// 3. Network issues can cause internal retries. To see log messages related to this use the instructions in +// the example function "Example_enableLogging". +func (pc *PartitionClient) ReceiveEvents(ctx context.Context, count int, options *ReceiveEventsOptions) ([]*ReceivedEventData, error) { + var events []*ReceivedEventData + + prefetchDisabled := pc.prefetch < 0 + + if count <= 0 { + return nil, internal.NewErrNonRetriable("count should be greater than 0") + } + + if prefetchDisabled && count > int(defaultMaxCreditSize) { + return nil, internal.NewErrNonRetriable(fmt.Sprintf("count cannot exceed %d", defaultMaxCreditSize)) + } + + err := pc.links.Retry(ctx, EventConsumer, "ReceiveEvents", pc.partitionID, pc.retryOptions, func(ctx context.Context, lwid internal.LinkWithID[amqpwrap.AMQPReceiverCloser]) error { + events = nil + + if prefetchDisabled { + remainingCredits := lwid.Link().Credits() + + if count > int(remainingCredits) { + newCredits := uint32(count) - remainingCredits + + log.Writef(EventConsumer, "(%s) Have %d outstanding credit, only issuing %d credits", lwid.String(), remainingCredits, newCredits) + + if err := lwid.Link().IssueCredit(newCredits); err != nil { + log.Writef(EventConsumer, "(%s) Error when issuing credits: %s", lwid.String(), err) + return err + } + } + } + + for { + amqpMessage, err := lwid.Link().Receive(ctx, nil) + + if internal.IsOwnershipLostError(err) { + log.Writef(EventConsumer, "(%s) Error, link ownership lost: %s", lwid.String(), err) + events = nil + return err + } + + if err != nil { + prefetched := getAllPrefetched(lwid.Link(), count-len(events)) + + for _, amqpMsg := range prefetched { + re, err := newReceivedEventData(amqpMsg) + + if err != nil { + log.Writef(EventConsumer, "(%s) Failed converting AMQP message to EventData: %s", lwid.String(), err) + return err + } + + events = append(events, re) + + if len(events) == count { + return nil + } + } + + // this lets cancel errors just return + return err + } + + receivedEvent, err := newReceivedEventData(amqpMessage) + + if err != nil { + log.Writef(EventConsumer, "(%s) Failed converting AMQP message to EventData: %s", lwid.String(), err) + return err + } + + events = append(events, receivedEvent) + + if len(events) == count { + return nil + } + } + }) + + if err != nil && len(events) == 0 { + transformedErr := internal.TransformError(err) + log.Writef(EventConsumer, "No events received, returning error %s", transformedErr.Error()) + return nil, transformedErr + } + + numEvents := len(events) + lastSequenceNumber := events[numEvents-1].SequenceNumber + + pc.offsetExpression = formatStartExpressionForSequence(">", lastSequenceNumber) + log.Writef(EventConsumer, "%d Events received, moving sequence to %d", numEvents, lastSequenceNumber) + return events, nil +} + +// Close releases resources for this client. +func (pc *PartitionClient) Close(ctx context.Context) error { + if pc.links != nil { + return pc.links.Close(ctx) + } + + return nil +} + +func (pc *PartitionClient) getEntityPath(partitionID string) string { + return fmt.Sprintf("%s/ConsumerGroups/%s/Partitions/%s", pc.eventHub, pc.consumerGroup, partitionID) +} + +func (pc *PartitionClient) newEventHubConsumerLink(ctx context.Context, session amqpwrap.AMQPSession, entityPath string, partitionID string) (internal.AMQPReceiverCloser, error) { + props := map[string]any{ + // this lets Event Hubs return error messages that identify which Receiver stole ownership (and other things) within + // error messages. + // Ex: (ownershiplost): link detached, reason: *Error{Condition: amqp:link:stolen, Description: New receiver 'EventHubConsumerClientTestID-Interloper' with higher epoch of '1' is created hence current receiver 'EventHubConsumerClientTestID' with epoch '0' is getting disconnected. If you are recreating the receiver, make sure a higher epoch is used. TrackingId:8031553f0000a5060009a59b63f517a0_G4_B22, SystemTracker:riparkdev:eventhub:tests~10922|$default, Timestamp:2023-02-21T19:12:41, Info: map[]} + "com.microsoft:receiver-name": pc.instanceID, + } + + if pc.ownerLevel != nil { + props["com.microsoft:epoch"] = *pc.ownerLevel + } + + receiverOptions := &amqp.ReceiverOptions{ + SettlementMode: to.Ptr(amqp.ReceiverSettleModeFirst), + Filters: []amqp.LinkFilter{ + amqp.NewSelectorFilter(pc.offsetExpression), + }, + Properties: props, + TargetAddress: pc.instanceID, + } + + if pc.prefetch > 0 { + log.Writef(EventConsumer, "Enabling prefetch with %d credits", pc.prefetch) + receiverOptions.Credit = pc.prefetch + } else if pc.prefetch == 0 { + log.Writef(EventConsumer, "Enabling prefetch with %d credits", defaultPrefetchSize) + receiverOptions.Credit = defaultPrefetchSize + } else { + // prefetch is disabled, enable manual credits and enable + // a reasonable default max for the buffer. + log.Writef(EventConsumer, "Disabling prefetch") + receiverOptions.Credit = -1 + } + + log.Writef(EventConsumer, "Creating receiver:\n source:%s\n instanceID: %s\n owner level: %d\n offset: %s\n manual: %v\n prefetch: %d", + entityPath, + pc.instanceID, + pc.ownerLevel, + pc.offsetExpression, + receiverOptions.Credit == -1, + pc.prefetch) + + receiver, err := session.NewReceiver(ctx, entityPath, partitionID, receiverOptions) + + if err != nil { + return nil, err + } + + return receiver, nil +} + +func (pc *PartitionClient) init(ctx context.Context) error { + return pc.links.Retry(ctx, EventConsumer, "Init", pc.partitionID, pc.retryOptions, func(ctx context.Context, lwid internal.LinkWithID[amqpwrap.AMQPReceiverCloser]) error { + return nil + }) +} + +type partitionClientArgs struct { + namespace internal.NamespaceForAMQPLinks + + consumerGroup string + eventHub string + instanceID string + partitionID string + retryOptions RetryOptions +} + +func newPartitionClient(args partitionClientArgs, options *PartitionClientOptions) (*PartitionClient, error) { + if options == nil { + options = &PartitionClientOptions{} + } + + offsetExpr, err := getStartExpression(options.StartPosition) + + if err != nil { + return nil, err + } + + if options.Prefetch > int32(defaultMaxCreditSize) { + // don't allow them to set the prefetch above the session window size. + return nil, internal.NewErrNonRetriable(fmt.Sprintf("options.Prefetch cannot exceed %d", defaultMaxCreditSize)) + } + + client := &PartitionClient{ + consumerGroup: args.consumerGroup, + eventHub: args.eventHub, + offsetExpression: offsetExpr, + ownerLevel: options.OwnerLevel, + partitionID: args.partitionID, + prefetch: options.Prefetch, + retryOptions: args.retryOptions, + instanceID: args.instanceID, + } + + client.links = internal.NewLinks(args.namespace, fmt.Sprintf("%s/$management", client.eventHub), client.getEntityPath, client.newEventHubConsumerLink) + + return client, nil +} + +func getAllPrefetched(receiver amqpwrap.AMQPReceiver, max int) []*amqp.Message { + var messages []*amqp.Message + + for i := 0; i < max; i++ { + msg := receiver.Prefetched() + + if msg == nil { + break + } + + messages = append(messages, msg) + } + + return messages +} + +func getStartExpression(startPosition StartPosition) (string, error) { + gt := ">" + + if startPosition.Inclusive { + gt = ">=" + } + + var errMultipleFieldsSet = errors.New("only a single start point can be set: Earliest, EnqueuedTime, Latest, Offset, or SequenceNumber") + + offsetExpr := "" + + if startPosition.EnqueuedTime != nil { + // time-based, non-inclusive + offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-enqueued-time %s '%d'", gt, startPosition.EnqueuedTime.UnixMilli()) + } + + if startPosition.Offset != nil { + // offset-based, non-inclusive + // ex: amqp.annotation.x-opt-enqueued-time %s '165805323000' + if offsetExpr != "" { + return "", errMultipleFieldsSet + } + + offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-offset %s '%d'", gt, *startPosition.Offset) + } + + if startPosition.Latest != nil && *startPosition.Latest { + if offsetExpr != "" { + return "", errMultipleFieldsSet + } + + offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-offset %s '@latest'", gt) + } + + if startPosition.SequenceNumber != nil { + if offsetExpr != "" { + return "", errMultipleFieldsSet + } + + offsetExpr = formatStartExpressionForSequence(gt, *startPosition.SequenceNumber) + } + + if startPosition.Earliest != nil && *startPosition.Earliest { + if offsetExpr != "" { + return "", errMultipleFieldsSet + } + + return "amqp.annotation.x-opt-offset > '-1'", nil + } + + if offsetExpr != "" { + return offsetExpr, nil + } + + // default to the start + return "amqp.annotation.x-opt-offset > '@latest'", nil +} + +func formatStartExpressionForSequence(op string, sequenceNumber int64) string { + return fmt.Sprintf("amqp.annotation.x-opt-sequence-number %s '%d'", op, sequenceNumber) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor.go new file mode 100644 index 00000000000..e7bc3f6039e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor.go @@ -0,0 +1,515 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "context" + "errors" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// processorOwnerLevel is the owner level we assign to every ProcessorPartitionClient +// created by this Processor. +var processorOwnerLevel = to.Ptr[int64](0) + +// ProcessorStrategy specifies the load balancing strategy used by the Processor. +type ProcessorStrategy string + +const ( + // ProcessorStrategyBalanced will attempt to claim a single partition at a time, until each active + // owner has an equal share of partitions. + // This is the default strategy. + ProcessorStrategyBalanced ProcessorStrategy = "balanced" + + // ProcessorStrategyGreedy will attempt to claim as many partitions at a time as it can, ignoring + // balance. + ProcessorStrategyGreedy ProcessorStrategy = "greedy" +) + +// ProcessorOptions are the options for the NewProcessor +// function. +type ProcessorOptions struct { + // LoadBalancingStrategy dictates how concurrent Processor instances distribute + // ownership of partitions between them. + // The default strategy is ProcessorStrategyBalanced. + LoadBalancingStrategy ProcessorStrategy + + // UpdateInterval controls how often attempt to claim partitions. + // The default value is 10 seconds. + UpdateInterval time.Duration + + // PartitionExpirationDuration is the amount of time before a partition is considered + // unowned. + // The default value is 60 seconds. + PartitionExpirationDuration time.Duration + + // StartPositions are the default start positions (configurable per partition, or with an overall + // default value) if a checkpoint is not found in the CheckpointStore. + // The default position is Latest. + StartPositions StartPositions + + // Prefetch represents the size of the internal prefetch buffer for each ProcessorPartitionClient + // created by this Processor. When set, this client will attempt to always maintain + // an internal cache of events of this size, asynchronously, increasing the odds that + // ReceiveEvents() will use a locally stored cache of events, rather than having to + // wait for events to arrive from the network. + // + // Defaults to 300 events if Prefetch == 0. + // Disabled if Prefetch < 0. + Prefetch int32 +} + +// StartPositions are used if there is no checkpoint for a partition in +// the checkpoint store. +type StartPositions struct { + // PerPartition controls the start position for a specific partition, + // by partition ID. If a partition is not configured here it will default + // to Default start position. + PerPartition map[string]StartPosition + + // Default is used if the partition is not found in the PerPartition map. + Default StartPosition +} + +type state int32 + +const ( + stateNone state = 0 + stateStopped state = 1 + stateRunning state = 2 +) + +// Processor uses a [ConsumerClient] and [CheckpointStore] to provide automatic +// load balancing between multiple Processor instances, even in separate +// processes or on separate machines. +// +// See [example_consuming_with_checkpoints_test.go] for an example, and the function documentation +// for [Run] for a more detailed description of how load balancing works. +// +// [example_consuming_with_checkpoints_test.go]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go +type Processor struct { + stateMu sync.Mutex + state state + + ownershipUpdateInterval time.Duration + defaultStartPositions StartPositions + checkpointStore CheckpointStore + prefetch int32 + + // consumerClient is actually a *azeventhubs.ConsumerClient + // it's an interface here to make testing easier. + consumerClient consumerClientForProcessor + + nextClients chan *ProcessorPartitionClient + nextClientsReady chan struct{} + consumerClientDetails consumerClientDetails + + lb *processorLoadBalancer + + // claimedOwnerships is set to whatever our current ownerships are. The underlying + // value is a []Ownership. + currentOwnerships *atomic.Value +} + +type consumerClientForProcessor interface { + GetEventHubProperties(ctx context.Context, options *GetEventHubPropertiesOptions) (EventHubProperties, error) + NewPartitionClient(partitionID string, options *PartitionClientOptions) (*PartitionClient, error) + getDetails() consumerClientDetails +} + +// NewProcessor creates a Processor. +// +// More information can be found in the documentation for the [Processor] +// type or the [example_consuming_with_checkpoints_test.go] for an example. +// +// [example_consuming_with_checkpoints_test.go]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go +func NewProcessor(consumerClient *ConsumerClient, checkpointStore CheckpointStore, options *ProcessorOptions) (*Processor, error) { + return newProcessorImpl(consumerClient, checkpointStore, options) +} + +func newProcessorImpl(consumerClient consumerClientForProcessor, checkpointStore CheckpointStore, options *ProcessorOptions) (*Processor, error) { + if options == nil { + options = &ProcessorOptions{} + } + + updateInterval := 10 * time.Second + + if options.UpdateInterval != 0 { + updateInterval = options.UpdateInterval + } + + partitionDurationExpiration := time.Minute + + if options.PartitionExpirationDuration != 0 { + partitionDurationExpiration = options.PartitionExpirationDuration + } + + startPosPerPartition := map[string]StartPosition{} + + if options.StartPositions.PerPartition != nil { + for k, v := range options.StartPositions.PerPartition { + startPosPerPartition[k] = v + } + } + + strategy := options.LoadBalancingStrategy + + switch strategy { + case ProcessorStrategyBalanced: + case ProcessorStrategyGreedy: + case "": + strategy = ProcessorStrategyBalanced + default: + return nil, fmt.Errorf("invalid load balancing strategy '%s'", strategy) + } + + currentOwnerships := &atomic.Value{} + currentOwnerships.Store([]Ownership{}) + + return &Processor{ + ownershipUpdateInterval: updateInterval, + consumerClient: consumerClient, + checkpointStore: checkpointStore, + + defaultStartPositions: StartPositions{ + PerPartition: startPosPerPartition, + Default: options.StartPositions.Default, + }, + prefetch: options.Prefetch, + consumerClientDetails: consumerClient.getDetails(), + nextClientsReady: make(chan struct{}), + lb: newProcessorLoadBalancer(checkpointStore, consumerClient.getDetails(), strategy, partitionDurationExpiration), + currentOwnerships: currentOwnerships, + + // `nextClients` will be properly initialized when the user calls + // Run() since it needs to query the # of partitions on the Event Hub. + nextClients: make(chan *ProcessorPartitionClient), + }, nil +} + +// NextPartitionClient will get the next owned [ProcessorPartitionClient] if one is acquired +// or will block until a new one arrives or [Processor.Run] is cancelled. When the Processor +// stops running this function will return nil. +// +// NOTE: You MUST call [ProcessorPartitionClient.Close] on the returned client to avoid +// leaking resources. +// +// See [example_consuming_with_checkpoints_test.go] for an example of typical usage. +// +// [example_consuming_with_checkpoints_test.go]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go +func (p *Processor) NextPartitionClient(ctx context.Context) *ProcessorPartitionClient { + select { + case <-ctx.Done(): + return nil + case <-p.nextClientsReady: + } + + select { + case nextClient := <-p.nextClients: + return nextClient + case <-ctx.Done(): + return nil + } +} + +func (p *Processor) checkState() error { + switch p.state { + case stateNone: + // not running so we can start. And lock out any other users. + p.state = stateRunning + return nil + case stateRunning: + return errors.New("the Processor is currently running. Concurrent calls to Run() are not allowed.") + case stateStopped: + return errors.New("the Processor has been stopped. Create a new instance to start processing again") + default: + return fmt.Errorf("unhandled state value %v", p.state) + } +} + +// Run handles the load balancing loop, blocking until the passed in context is cancelled +// or it encounters an unrecoverable error. On cancellation, it will return a nil error. +// +// This function should run for the lifetime of your application, or for as long as you want +// to continue to claim and process partitions. +// +// Once a Processor has been stopped it cannot be restarted and a new instance must +// be created. +// +// As partitions are claimed new [ProcessorPartitionClient] instances will be returned from +// [Processor.NextPartitionClient]. This can happen at any time, based on new Processor instances +// coming online, as well as other Processors exiting. +// +// [ProcessorPartitionClient] are used like a [PartitionClient] but provide an [ProcessorPartitionClient.UpdateCheckpoint] +// function that will store a checkpoint into the [CheckpointStore]. If the client were to crash, or be restarted +// it will pick up from the last checkpoint. +// +// See [example_consuming_with_checkpoints_test.go] for an example of typical usage. +// +// [example_consuming_with_checkpoints_test.go]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go +func (p *Processor) Run(ctx context.Context) error { + p.stateMu.Lock() + err := p.checkState() + p.stateMu.Unlock() + + if err != nil { + return err + } + + err = p.runImpl(ctx) + + // the context is the proper way to close down the Run() loop, so it's not + // an error and doesn't need to be returned. + if ctx.Err() != nil { + return nil + } + + return err +} + +func (p *Processor) runImpl(ctx context.Context) error { + consumers := &sync.Map{} + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + p.close(ctx, consumers) + }() + + // size the channel to the # of partitions. We can never exceed this size since + // we'll never reclaim a partition that we already have ownership of. + eventHubProperties, err := p.initNextClientsCh(ctx) + + if err != nil { + return err + } + + // do one dispatch immediately + if err := p.dispatch(ctx, eventHubProperties, consumers); err != nil { + return err + } + + // note randSource is not thread-safe but it's not currently used in a way that requires + // it to be. + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + for { + select { + case <-ctx.Done(): + return nil + case <-time.After(calculateUpdateInterval(rnd, p.ownershipUpdateInterval)): + if err := p.dispatch(ctx, eventHubProperties, consumers); err != nil { + return err + } + } + } +} + +func calculateUpdateInterval(rnd *rand.Rand, updateInterval time.Duration) time.Duration { + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + // (copied from the retry code for calculating jitter) + return time.Duration(updateInterval.Seconds() * (rnd.Float64()/2 + 0.8) * float64(time.Second)) +} + +func (p *Processor) initNextClientsCh(ctx context.Context) (EventHubProperties, error) { + eventHubProperties, err := p.consumerClient.GetEventHubProperties(ctx, nil) + + if err != nil { + return EventHubProperties{}, err + } + + p.nextClients = make(chan *ProcessorPartitionClient, len(eventHubProperties.PartitionIDs)) + close(p.nextClientsReady) + + return eventHubProperties, nil +} + +// dispatch uses the checkpoint store to figure out which partitions should be processed by this +// instance and starts a PartitionClient, if there isn't one. +// NOTE: due to random number usage in the load balancer, this function is not thread safe. +func (p *Processor) dispatch(ctx context.Context, eventHubProperties EventHubProperties, consumers *sync.Map) error { + ownerships, err := p.lb.LoadBalance(ctx, eventHubProperties.PartitionIDs) + + if err != nil { + return err + } + + checkpoints, err := p.getCheckpointsMap(ctx) + + if err != nil { + return err + } + + wg := sync.WaitGroup{} + + // store off the set of ownerships we claimed this round - when the processor + // shuts down we'll clear them (if we still own them). + tmpOwnerships := make([]Ownership, len(ownerships)) + copy(tmpOwnerships, ownerships) + p.currentOwnerships.Store(tmpOwnerships) + + for _, ownership := range ownerships { + wg.Add(1) + + go func(o Ownership) { + defer wg.Done() + + err := p.addPartitionClient(ctx, o, checkpoints, consumers) + + if err != nil { + azlog.Writef(EventConsumer, "failed to create partition client for partition '%s': %s", o.PartitionID, err.Error()) + } + }(ownership) + } + + wg.Wait() + + return nil +} + +// addPartitionClient creates a ProcessorPartitionClient +func (p *Processor) addPartitionClient(ctx context.Context, ownership Ownership, checkpoints map[string]Checkpoint, consumers *sync.Map) error { + processorPartClient := &ProcessorPartitionClient{ + consumerClientDetails: p.consumerClientDetails, + checkpointStore: p.checkpointStore, + innerClient: nil, + partitionID: ownership.PartitionID, + cleanupFn: func() { + consumers.Delete(ownership.PartitionID) + }, + } + + // RP: I don't want to accidentally end up doing this logic because the user was closing it as we + // were doing our next load balance. + if _, alreadyExists := consumers.LoadOrStore(ownership.PartitionID, processorPartClient); alreadyExists { + return nil + } + + sp, err := p.getStartPosition(checkpoints, ownership) + + if err != nil { + return err + } + + partClient, err := p.consumerClient.NewPartitionClient(ownership.PartitionID, &PartitionClientOptions{ + StartPosition: sp, + OwnerLevel: processorOwnerLevel, + Prefetch: p.prefetch, + }) + + if err != nil { + consumers.Delete(ownership.PartitionID) + return err + } + + // make sure we create the link _now_ - if we're stealing we want to stake a claim _now_, rather than + // later when the user actually calls ReceiveEvents(), since the acquisition of the link is lazy. + if err := partClient.init(ctx); err != nil { + consumers.Delete(ownership.PartitionID) + _ = partClient.Close(ctx) + return err + } + + processorPartClient.innerClient = partClient + + select { + case p.nextClients <- processorPartClient: + return nil + default: + processorPartClient.Close(ctx) + return fmt.Errorf("partitions channel full, consumer for partition %s could not be returned", ownership.PartitionID) + } +} + +func (p *Processor) getStartPosition(checkpoints map[string]Checkpoint, ownership Ownership) (StartPosition, error) { + startPosition := p.defaultStartPositions.Default + cp, hasCheckpoint := checkpoints[ownership.PartitionID] + + if hasCheckpoint { + if cp.Offset != nil { + startPosition = StartPosition{ + Offset: cp.Offset, + } + } else if cp.SequenceNumber != nil { + startPosition = StartPosition{ + SequenceNumber: cp.SequenceNumber, + } + } else { + return StartPosition{}, fmt.Errorf("invalid checkpoint for %s, no offset or sequence number", ownership.PartitionID) + } + } else if p.defaultStartPositions.PerPartition != nil { + defaultStartPosition, exists := p.defaultStartPositions.PerPartition[ownership.PartitionID] + + if exists { + startPosition = defaultStartPosition + } + } + + return startPosition, nil +} + +func (p *Processor) getCheckpointsMap(ctx context.Context) (map[string]Checkpoint, error) { + details := p.consumerClient.getDetails() + checkpoints, err := p.checkpointStore.ListCheckpoints(ctx, details.FullyQualifiedNamespace, details.EventHubName, details.ConsumerGroup, nil) + + if err != nil { + return nil, err + } + + m := map[string]Checkpoint{} + + for _, cp := range checkpoints { + m[cp.PartitionID] = cp + } + + return m, nil +} + +func (p *Processor) close(ctx context.Context, consumersMap *sync.Map) { + consumersMap.Range(func(key, value any) bool { + client := value.(*ProcessorPartitionClient) + + if client != nil { + client.Close(ctx) + } + + return true + }) + + currentOwnerships := p.currentOwnerships.Load().([]Ownership) + + for i := 0; i < len(currentOwnerships); i++ { + currentOwnerships[i].OwnerID = relinquishedOwnershipID + } + + _, err := p.checkpointStore.ClaimOwnership(ctx, currentOwnerships, nil) + + if err != nil { + azlog.Writef(EventConsumer, "Failed to relinquish ownerships. New processors will have to wait for ownerships to expire: %s", err.Error()) + } + + p.stateMu.Lock() + p.state = stateStopped + p.stateMu.Unlock() + + // NextPartitionClient() will quit out now that p.nextClients is closed. + close(p.nextClients) + + select { + case <-p.nextClientsReady: + // already closed + default: + close(p.nextClientsReady) + } +} + +// relinquishedOwnershipID indicates that a partition is immediately available, similar to +// how we treat an ownership that is expired as available. +const relinquishedOwnershipID = "" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_load_balancer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_load_balancer.go new file mode 100644 index 00000000000..62ec59a88e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_load_balancer.go @@ -0,0 +1,302 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "context" + "fmt" + "math" + "math/rand" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type processorLoadBalancer struct { + checkpointStore CheckpointStore + details consumerClientDetails + strategy ProcessorStrategy + partitionExpirationDuration time.Duration + + // NOTE: when you create your own *rand.Rand it is not thread safe. + rnd *rand.Rand +} + +func newProcessorLoadBalancer(checkpointStore CheckpointStore, details consumerClientDetails, strategy ProcessorStrategy, partitionExpiration time.Duration) *processorLoadBalancer { + return &processorLoadBalancer{ + checkpointStore: checkpointStore, + details: details, + strategy: strategy, + partitionExpirationDuration: partitionExpiration, + rnd: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +type loadBalancerInfo struct { + // current are the partitions that _we_ own + current []Ownership + + // unownedOrExpired partitions either had no claim _ever_ or were once + // owned but the ownership claim has expired. + unownedOrExpired []Ownership + + // aboveMax are ownerships where the specific owner has too many partitions + // it contains _all_ the partitions for that particular consumer. + aboveMax []Ownership + + // claimMorePartitions is true when we should try to claim more partitions + // because we're under the limit, or we're in a situation where we could claim + // one extra partition. + claimMorePartitions bool + + // maxAllowed is the maximum number of partitions that other processors are allowed + // to own during this round. It can change based on how many partitions we own and whether + // an 'extra' partition is allowed (ie, partitions %owners is not 0). Look at + // [processorLoadBalancer.getAvailablePartitions] for more details. + maxAllowed int + + raw []Ownership +} + +// loadBalance calls through to the user's configured load balancing algorithm. +// NOTE: this function is NOT thread safe! +func (lb *processorLoadBalancer) LoadBalance(ctx context.Context, partitionIDs []string) ([]Ownership, error) { + lbinfo, err := lb.getAvailablePartitions(ctx, partitionIDs) + + if err != nil { + return nil, err + } + + ownerships := lbinfo.current + + if lbinfo.claimMorePartitions { + switch lb.strategy { + case ProcessorStrategyGreedy: + log.Writef(EventConsumer, "[%s] Using greedy strategy to claim partitions", lb.details.ClientID) + ownerships = lb.greedyLoadBalancer(ctx, lbinfo) + case ProcessorStrategyBalanced: + log.Writef(EventConsumer, "[%s] Using balanced strategy to claim partitions", lb.details.ClientID) + o := lb.balancedLoadBalancer(ctx, lbinfo) + + if o != nil { + ownerships = append(lbinfo.current, *o) + } + default: + return nil, fmt.Errorf("[%s] invalid load balancing strategy '%s'", lb.details.ClientID, lb.strategy) + } + } + + actual, err := lb.checkpointStore.ClaimOwnership(ctx, ownerships, nil) + + if err != nil { + return nil, err + } + + if log.Should(EventConsumer) { + log.Writef(EventConsumer, "[%0.5s] Asked for %s, got %s", lb.details.ClientID, partitionsForOwnerships(ownerships), partitionsForOwnerships(actual)) + } + + return actual, nil +} + +func partitionsForOwnerships(all []Ownership) string { + var parts []string + + for _, o := range all { + parts = append(parts, o.PartitionID) + } + + return strings.Join(parts, ",") +} + +// getAvailablePartitions looks through the ownership list (using the checkpointstore.ListOwnership) and evaluates: +// - Whether we should claim more partitions +// - Which partitions are available - unowned/relinquished, expired or processors that own more than the maximum allowed. +// +// Load balancing happens in individual functions +func (lb *processorLoadBalancer) getAvailablePartitions(ctx context.Context, partitionIDs []string) (loadBalancerInfo, error) { + log.Writef(EventConsumer, "[%s] Listing ownership for %s/%s/%s", lb.details.ClientID, lb.details.FullyQualifiedNamespace, lb.details.EventHubName, lb.details.ConsumerGroup) + + ownerships, err := lb.checkpointStore.ListOwnership(ctx, lb.details.FullyQualifiedNamespace, lb.details.EventHubName, lb.details.ConsumerGroup, nil) + + if err != nil { + return loadBalancerInfo{}, err + } + + alreadyAdded := map[string]bool{} + groupedByOwner := map[string][]Ownership{ + lb.details.ClientID: nil, + } + + var unownedOrExpired []Ownership + + // split out partitions by whether they're currently owned + // and if they're expired/relinquished. + for _, o := range ownerships { + alreadyAdded[o.PartitionID] = true + + if time.Since(o.LastModifiedTime.UTC()) > lb.partitionExpirationDuration { + unownedOrExpired = append(unownedOrExpired, o) + continue + } + + if o.OwnerID == relinquishedOwnershipID { + unownedOrExpired = append(unownedOrExpired, o) + continue + } + + groupedByOwner[o.OwnerID] = append(groupedByOwner[o.OwnerID], o) + } + + numExpired := len(unownedOrExpired) + + // add in all the unowned partitions + for _, partID := range partitionIDs { + if alreadyAdded[partID] { + continue + } + + unownedOrExpired = append(unownedOrExpired, Ownership{ + FullyQualifiedNamespace: lb.details.FullyQualifiedNamespace, + ConsumerGroup: lb.details.ConsumerGroup, + EventHubName: lb.details.EventHubName, + PartitionID: partID, + OwnerID: lb.details.ClientID, + // note that we don't have etag info here since nobody has + // ever owned this partition. + }) + } + + minRequired := len(partitionIDs) / len(groupedByOwner) + maxAllowed := minRequired + allowExtraPartition := len(partitionIDs)%len(groupedByOwner) > 0 + + // only allow owners to keep extra partitions if we've already met our minimum bar. Otherwise + // above the minimum is fair game. + if allowExtraPartition && len(groupedByOwner[lb.details.ClientID]) >= minRequired { + maxAllowed += 1 + } + + var aboveMax []Ownership + + for id, ownerships := range groupedByOwner { + if id == lb.details.ClientID { + continue + } + + if len(ownerships) > maxAllowed { + aboveMax = append(aboveMax, ownerships...) + } + } + + claimMorePartitions := true + current := groupedByOwner[lb.details.ClientID] + + if len(current) >= maxAllowed { + // - I have _exactly_ the right amount + // or + // - I have too many. We expect to have some stolen from us, but we'll maintain + // ownership for now. + claimMorePartitions = false + } else if allowExtraPartition && len(current) == maxAllowed-1 { + // In the 'allowExtraPartition' scenario, some consumers will have an extra partition + // since things don't divide up evenly. We're one under the max, which means we _might_ + // be able to claim another one. + // + // We will attempt to grab _one_ more but only if there are free partitions available + // or if one of the consumers has more than the max allowed. + claimMorePartitions = len(unownedOrExpired) > 0 || len(aboveMax) > 0 + } + + log.Writef(EventConsumer, "[%s] claimMorePartitions: %t, owners: %d, current: %d, unowned: %d, expired: %d, above: %d", + lb.details.ClientID, + claimMorePartitions, + len(groupedByOwner), + len(current), + len(unownedOrExpired)-numExpired, + numExpired, + len(aboveMax)) + + return loadBalancerInfo{ + current: current, + unownedOrExpired: unownedOrExpired, + aboveMax: aboveMax, + claimMorePartitions: claimMorePartitions, + raw: ownerships, + maxAllowed: maxAllowed, + }, nil +} + +// greedyLoadBalancer will attempt to grab as many free partitions as it needs to balance +// in each round. +func (lb *processorLoadBalancer) greedyLoadBalancer(ctx context.Context, lbinfo loadBalancerInfo) []Ownership { + ours := lbinfo.current + + // try claiming from the completely unowned or expires ownerships _first_ + randomOwnerships := getRandomOwnerships(lb.rnd, lbinfo.unownedOrExpired, lbinfo.maxAllowed-len(ours)) + ours = append(ours, randomOwnerships...) + + if len(ours) < lbinfo.maxAllowed { + log.Writef(EventConsumer, "Not enough expired or unowned partitions, will need to steal from other processors") + + // if that's not enough then we'll randomly steal from any owners that had partitions + // above the maximum. + randomOwnerships := getRandomOwnerships(lb.rnd, lbinfo.aboveMax, lbinfo.maxAllowed-len(ours)) + ours = append(ours, randomOwnerships...) + } + + for i := 0; i < len(ours); i++ { + ours[i] = lb.resetOwnership(ours[i]) + } + + return ours +} + +// balancedLoadBalancer attempts to split the partition load out between the available +// consumers so each one has an even amount (or even + 1, if the # of consumers and # +// of partitions doesn't divide evenly). +// +// NOTE: the checkpoint store itself does not have a concept of 'presence' that doesn't +// ALSO involve owning a partition. It's possible for a consumer to get boxed out for a +// bit until it manages to steal at least one partition since the other consumers don't +// know it exists until then. +func (lb *processorLoadBalancer) balancedLoadBalancer(ctx context.Context, lbinfo loadBalancerInfo) *Ownership { + if len(lbinfo.unownedOrExpired) > 0 { + idx := lb.rnd.Intn(len(lbinfo.unownedOrExpired)) + o := lb.resetOwnership(lbinfo.unownedOrExpired[idx]) + return &o + } + + if len(lbinfo.aboveMax) > 0 { + idx := lb.rnd.Intn(len(lbinfo.aboveMax)) + o := lb.resetOwnership(lbinfo.aboveMax[idx]) + return &o + } + + return nil +} + +func (lb *processorLoadBalancer) resetOwnership(o Ownership) Ownership { + o.OwnerID = lb.details.ClientID + return o +} + +func getRandomOwnerships(rnd *rand.Rand, ownerships []Ownership, count int) []Ownership { + limit := int(math.Min(float64(count), float64(len(ownerships)))) + + if limit == 0 { + return nil + } + + choices := rnd.Perm(limit) + + var newOwnerships []Ownership + + for i := 0; i < len(choices); i++ { + newOwnerships = append(newOwnerships, ownerships[choices[i]]) + } + + return newOwnerships +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_partition_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_partition_client.go new file mode 100644 index 00000000000..cc52c533da5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/processor_partition_client.go @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import "context" + +// ProcessorPartitionClient allows you to receive events, similar to a [PartitionClient], with a +// checkpoint store for tracking progress. +// +// This type is instantiated from [Processor.NextPartitionClient], which handles load balancing +// of partition ownership between multiple [Processor] instances. +// +// See [example_consuming_with_checkpoints_test.go] for an example. +// +// NOTE: If you do NOT want to use dynamic load balancing, and would prefer to track state and ownership +// manually, use the [ConsumerClient] instead. +// +// [example_consuming_with_checkpoints_test.go]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go +type ProcessorPartitionClient struct { + partitionID string + innerClient *PartitionClient + checkpointStore CheckpointStore + cleanupFn func() + consumerClientDetails consumerClientDetails +} + +// ReceiveEvents receives events until 'count' events have been received or the context +// has been cancelled. +// +// See [PartitionClient.ReceiveEvents] for more information, including troubleshooting. +func (c *ProcessorPartitionClient) ReceiveEvents(ctx context.Context, count int, options *ReceiveEventsOptions) ([]*ReceivedEventData, error) { + return c.innerClient.ReceiveEvents(ctx, count, options) +} + +// UpdateCheckpoint updates the checkpoint in the CheckpointStore. New Processors will resume after +// this checkpoint for this partition. +func (p *ProcessorPartitionClient) UpdateCheckpoint(ctx context.Context, latestEvent *ReceivedEventData, options *UpdateCheckpointOptions) error { + seq := latestEvent.SequenceNumber + offset := latestEvent.Offset + + return p.checkpointStore.SetCheckpoint(ctx, Checkpoint{ + ConsumerGroup: p.consumerClientDetails.ConsumerGroup, + EventHubName: p.consumerClientDetails.EventHubName, + FullyQualifiedNamespace: p.consumerClientDetails.FullyQualifiedNamespace, + PartitionID: p.partitionID, + SequenceNumber: &seq, + Offset: &offset, + }, nil) +} + +// PartitionID is the partition ID of the partition we're receiving from. +// This will not change during the lifetime of this ProcessorPartitionClient. +func (p *ProcessorPartitionClient) PartitionID() string { + return p.partitionID +} + +// Close releases resources for the partition client. +// This does not close the ConsumerClient that the Processor was started with. +func (c *ProcessorPartitionClient) Close(ctx context.Context) error { + c.cleanupFn() + + if c.innerClient != nil { + return c.innerClient.Close(ctx) + } + + return nil +} + +// UpdateCheckpointOptions contains optional parameters for the [ProcessorPartitionClient.UpdateCheckpoint] function. +type UpdateCheckpointOptions struct { + // For future expansion +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/producer_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/producer_client.go new file mode 100644 index 00000000000..56e5c9d953a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/producer_client.go @@ -0,0 +1,312 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azeventhubs + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" + "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" + "github.com/Azure/go-amqp" +) + +// WebSocketConnParams are passed to your web socket creation function (ClientOptions.NewWebSocketConn) +type WebSocketConnParams = exported.WebSocketConnParams + +// RetryOptions represent the options for retries. +type RetryOptions = exported.RetryOptions + +// ProducerClientOptions contains options for the `NewProducerClient` and `NewProducerClientFromConnectionString` +// functions. +type ProducerClientOptions struct { + // Application ID that will be passed to the namespace. + ApplicationID string + + // NewWebSocketConn is a function that can create a net.Conn for use with websockets. + // For an example, see ExampleNewClient_usingWebsockets() function in example_client_test.go. + NewWebSocketConn func(ctx context.Context, params WebSocketConnParams) (net.Conn, error) + + // RetryOptions controls how often operations are retried from this client and any + // Receivers and Senders created from this client. + RetryOptions RetryOptions + + // TLSConfig configures a client with a custom *tls.Config. + TLSConfig *tls.Config +} + +// ProducerClient can be used to send events to an Event Hub. +type ProducerClient struct { + eventHub string + links *internal.Links[amqpwrap.AMQPSenderCloser] + namespace internal.NamespaceForProducerOrConsumer + retryOptions RetryOptions +} + +// anyPartitionID is what we target if we want to send a message and let Event Hubs pick a partition +// or if we're doing an operation that isn't partition specific, such as querying the management link +// to get event hub properties or partition properties. +const anyPartitionID = "" + +// NewProducerClient creates a ProducerClient which uses an azcore.TokenCredential for authentication. You +// MUST call [ProducerClient.Close] on this client to avoid leaking resources. +// +// The fullyQualifiedNamespace is the Event Hubs namespace name (ex: myeventhub.servicebus.windows.net) +// The credential is one of the credentials in the [azidentity] package. +// +// [azidentity]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity +func NewProducerClient(fullyQualifiedNamespace string, eventHub string, credential azcore.TokenCredential, options *ProducerClientOptions) (*ProducerClient, error) { + return newProducerClientImpl(producerClientCreds{ + fullyQualifiedNamespace: fullyQualifiedNamespace, + credential: credential, + eventHub: eventHub, + }, options) +} + +// NewProducerClientFromConnectionString creates a ProducerClient from a connection string. You +// MUST call [ProducerClient.Close] on this client to avoid leaking resources. +// +// connectionString can be one of two formats - with or without an EntityPath key. +// +// When the connection string does not have an entity path, as shown below, the eventHub parameter cannot +// be empty and should contain the name of your event hub. +// +// Endpoint=sb://.servicebus.windows.net/;SharedAccessKeyName=;SharedAccessKey= +// +// When the connection string DOES have an entity path, as shown below, the eventHub parameter must be empty. +// +// Endpoint=sb://.servicebus.windows.net/;SharedAccessKeyName=;SharedAccessKey=;EntityPath=; +func NewProducerClientFromConnectionString(connectionString string, eventHub string, options *ProducerClientOptions) (*ProducerClient, error) { + props, err := parseConn(connectionString, eventHub) + + if err != nil { + return nil, err + } + + return newProducerClientImpl(producerClientCreds{ + connectionString: connectionString, + eventHub: *props.EntityPath, + }, options) +} + +// EventDataBatchOptions contains optional parameters for the [ProducerClient.NewEventDataBatch] function. +// +// If both PartitionKey and PartitionID are nil, Event Hubs will choose an arbitrary partition +// for any events in this [EventDataBatch]. +type EventDataBatchOptions struct { + // MaxBytes overrides the max size (in bytes) for a batch. + // By default NewEventDataBatch will use the max message size provided by the service. + MaxBytes uint64 + + // PartitionKey is hashed to calculate the partition assignment. Messages and message + // batches with the same PartitionKey are guaranteed to end up in the same partition. + // Note that if you use this option then PartitionID cannot be set. + PartitionKey *string + + // PartitionID is the ID of the partition to send these messages to. + // Note that if you use this option then PartitionKey cannot be set. + PartitionID *string +} + +// NewEventDataBatch can be used to create an EventDataBatch, which can contain multiple +// events. +// +// EventDataBatch contains logic to make sure that the it doesn't exceed the maximum size +// for the Event Hubs link, using it's [azeventhubs.EventDataBatch.AddEventData] function. +// A lower size limit can also be configured through the options. +// +// NOTE: if options is nil or empty, Event Hubs will choose an arbitrary partition for any +// events in this [EventDataBatch]. +// +// If the operation fails it can return an azeventhubs.Error type if the failure is actionable. +func (pc *ProducerClient) NewEventDataBatch(ctx context.Context, options *EventDataBatchOptions) (*EventDataBatch, error) { + var batch *EventDataBatch + + partitionID := anyPartitionID + + if options != nil && options.PartitionID != nil { + partitionID = *options.PartitionID + } + + err := pc.links.Retry(ctx, exported.EventProducer, "NewEventDataBatch", partitionID, pc.retryOptions, func(ctx context.Context, lwid internal.LinkWithID[amqpwrap.AMQPSenderCloser]) error { + tmpBatch, err := newEventDataBatch(lwid.Link(), options) + + if err != nil { + return err + } + + batch = tmpBatch + return nil + }) + + if err != nil { + return nil, internal.TransformError(err) + } + + return batch, nil +} + +// SendEventDataBatchOptions contains optional parameters for the SendEventDataBatch function +type SendEventDataBatchOptions struct { + // For future expansion +} + +// SendEventDataBatch sends an event data batch to Event Hubs. +func (pc *ProducerClient) SendEventDataBatch(ctx context.Context, batch *EventDataBatch, options *SendEventDataBatchOptions) error { + amqpMessage, err := batch.toAMQPMessage() + + if err != nil { + return err + } + + partID := getPartitionID(batch.partitionID) + + err = pc.links.Retry(ctx, exported.EventProducer, "SendEventDataBatch", partID, pc.retryOptions, func(ctx context.Context, lwid internal.LinkWithID[amqpwrap.AMQPSenderCloser]) error { + azlog.Writef(EventProducer, "[%s] Sending message with ID %v to partition %q", lwid.String(), amqpMessage.Properties.MessageID, partID) + return lwid.Link().Send(ctx, amqpMessage, nil) + }) + return internal.TransformError(err) +} + +// GetPartitionProperties gets properties for a specific partition. This includes data like the last enqueued sequence number, the first sequence +// number and when an event was last enqueued to the partition. +func (pc *ProducerClient) GetPartitionProperties(ctx context.Context, partitionID string, options *GetPartitionPropertiesOptions) (PartitionProperties, error) { + return getPartitionProperties(ctx, EventProducer, pc.namespace, pc.links, pc.eventHub, partitionID, pc.retryOptions, options) +} + +// GetEventHubProperties gets event hub properties, like the available partition IDs and when the Event Hub was created. +func (pc *ProducerClient) GetEventHubProperties(ctx context.Context, options *GetEventHubPropertiesOptions) (EventHubProperties, error) { + return getEventHubProperties(ctx, EventProducer, pc.namespace, pc.links, pc.eventHub, pc.retryOptions, options) +} + +// Close releases resources for this client. +func (pc *ProducerClient) Close(ctx context.Context) error { + if err := pc.links.Close(ctx); err != nil { + azlog.Writef(EventProducer, "Failed when closing links while shutting down producer client: %s", err.Error()) + } + return pc.namespace.Close(ctx, true) +} + +func (pc *ProducerClient) getEntityPath(partitionID string) string { + if partitionID != anyPartitionID { + return fmt.Sprintf("%s/Partitions/%s", pc.eventHub, partitionID) + } else { + // this is the "let Event Hubs" decide link - any sends that occur here will + // end up getting distributed to different partitions on the service side, rather + // then being specified in the client. + return pc.eventHub + } +} + +func (pc *ProducerClient) newEventHubProducerLink(ctx context.Context, session amqpwrap.AMQPSession, entityPath string, partitionID string) (amqpwrap.AMQPSenderCloser, error) { + sender, err := session.NewSender(ctx, entityPath, partitionID, &amqp.SenderOptions{ + SettlementMode: to.Ptr(amqp.SenderSettleModeMixed), + RequestedReceiverSettleMode: to.Ptr(amqp.ReceiverSettleModeFirst), + }) + + if err != nil { + return nil, err + } + + return sender, nil +} + +type producerClientCreds struct { + connectionString string + + // the Event Hubs namespace name (ex: myservicebus.servicebus.windows.net) + fullyQualifiedNamespace string + credential azcore.TokenCredential + + eventHub string +} + +func newProducerClientImpl(creds producerClientCreds, options *ProducerClientOptions) (*ProducerClient, error) { + client := &ProducerClient{ + eventHub: creds.eventHub, + } + + var nsOptions []internal.NamespaceOption + + if creds.connectionString != "" { + nsOptions = append(nsOptions, internal.NamespaceWithConnectionString(creds.connectionString)) + } else if creds.credential != nil { + option := internal.NamespaceWithTokenCredential( + creds.fullyQualifiedNamespace, + creds.credential) + + nsOptions = append(nsOptions, option) + } + + if options != nil { + client.retryOptions = options.RetryOptions + + if options.TLSConfig != nil { + nsOptions = append(nsOptions, internal.NamespaceWithTLSConfig(options.TLSConfig)) + } + + if options.NewWebSocketConn != nil { + nsOptions = append(nsOptions, internal.NamespaceWithWebSocket(options.NewWebSocketConn)) + } + + if options.ApplicationID != "" { + nsOptions = append(nsOptions, internal.NamespaceWithUserAgent(options.ApplicationID)) + } + + nsOptions = append(nsOptions, internal.NamespaceWithRetryOptions(options.RetryOptions)) + } + + tmpNS, err := internal.NewNamespace(nsOptions...) + + if err != nil { + return nil, err + } + + client.namespace = tmpNS + + client.links = internal.NewLinks(tmpNS, fmt.Sprintf("%s/$management", client.eventHub), client.getEntityPath, client.newEventHubProducerLink) + + return client, err +} + +// parseConn parses the connection string and ensures that the returned [exported.ConnectionStringProperties] +// has an EntityPath set, either from the connection string or using the eventHub parameter. +// +// If the connection string has an EntityPath then eventHub must be empty. +// If the connection string does not have an entity path then the eventHub must contain a value. +func parseConn(connectionString string, eventHub string) (exported.ConnectionStringProperties, error) { + props, err := exported.ParseConnectionString(connectionString) + + if err != nil { + return exported.ConnectionStringProperties{}, err + } + + if props.EntityPath == nil { + if eventHub == "" { + return exported.ConnectionStringProperties{}, errors.New("connection string does not contain an EntityPath. eventHub cannot be an empty string") + } + props.EntityPath = &eventHub + } else { + if eventHub != "" { + return exported.ConnectionStringProperties{}, errors.New("connection string contains an EntityPath. eventHub must be an empty string") + } + } + + return props, nil +} + +func getPartitionID(partitionID *string) string { + if partitionID != nil { + return *partitionID + } + + return anyPartitionID +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/sample.env b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/sample.env new file mode 100644 index 00000000000..f8687bcf89a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/sample.env @@ -0,0 +1,20 @@ +# These are environment variables you'll need to run tests and +# samples in this package. +# NOTE: Rename this file to .env before running any tests. + +# The connection string for your event hub: +# Endpoint=sb://.servicebus.windows.net/;SharedAccessKeyName=;SharedAccessKey=;EntityPath= +EVENTHUB_CONNECTION_STRING=event-hub-connection-string + +# Your Event Hub namespace: +# .servicebus.windows.net> +EVENTHUB_NAMESPACE=event-hub-namespace + +# The name of the event hub, within your Event Hub namespace +EVENTHUB_NAME=event-hub-name + +# Checkpoint store information + +# Azure storage account connection string +# DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net +CHECKPOINTSTORE_STORAGE_CONNECTION_STRING=storage-connection-string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/test-resources.bicep new file mode 100644 index 00000000000..1310fee901a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/test-resources.bicep @@ -0,0 +1,225 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +@description('The base resource name.') +param baseName string = resourceGroup().name + +#disable-next-line no-hardcoded-env-urls // it's flagging the help string. +@description('Storage endpoint suffix. The default value uses Azure Public Cloud (ie: core.windows.net)') +param storageEndpointSuffix string = environment().suffixes.storage + +@description('The resource location') +param location string = resourceGroup().location + +var apiVersion = '2017-04-01' +var storageApiVersion = '2019-04-01' +var namespaceName = baseName +var storageAccountName = 'storage${baseName}' +var containerName = 'container' +var iotName = 'iot${baseName}' +var authorizationName = '${baseName}/RootManageSharedAccessKey' + +resource namespace 'Microsoft.EventHub/namespaces@2017-04-01' = { + name: namespaceName + location: location + sku: { + name: 'Standard' + tier: 'Standard' + capacity: 5 + } + properties: { + isAutoInflateEnabled: false + maximumThroughputUnits: 0 + } +} + +resource authorization 'Microsoft.EventHub/namespaces/AuthorizationRules@2017-04-01' = { + name: authorizationName + properties: { + rights: [ + 'Listen' + 'Manage' + 'Send' + ] + } + dependsOn: [ + namespace + ] +} + +resource authorizedListenOnly 'Microsoft.EventHub/namespaces/AuthorizationRules@2017-04-01' = { + name: 'ListenOnly' + parent: namespace + properties: { + rights: [ + 'Listen' + ] + } +} + +resource authorizedSendOnly 'Microsoft.EventHub/namespaces/AuthorizationRules@2017-04-01' = { + name: 'SendOnly' + parent: namespace + properties: { + rights: [ + 'Send' + ] + } +} + +resource eventHub 'Microsoft.EventHub/namespaces/eventhubs@2017-04-01' = { + name: 'eventhub' + properties: { + messageRetentionInDays: 1 + partitionCount: 4 + } + parent: namespace +} + +resource linksonly 'Microsoft.EventHub/namespaces/eventhubs@2017-04-01' = { + name: 'linksonly' + properties: { + messageRetentionInDays: 1 + partitionCount: 1 + } + parent: namespace +} + +resource namespaceName_default 'Microsoft.EventHub/namespaces/networkRuleSets@2017-04-01' = { + name: 'default' + parent: namespace + properties: { + defaultAction: 'Deny' + virtualNetworkRules: [] + ipRules: [] + } +} + +resource eventHubNameFull_Default 'Microsoft.EventHub/namespaces/eventhubs/consumergroups@2017-04-01' = { + name: '$Default' + properties: {} + parent: eventHub +} + +resource storageAccount 'Microsoft.Storage/storageAccounts@2019-04-01' = { + name: storageAccountName + location: location + sku: { + name: 'Standard_RAGRS' + } + kind: 'StorageV2' + properties: { + networkAcls: { + bypass: 'AzureServices' + virtualNetworkRules: [] + ipRules: [] + defaultAction: 'Allow' + } + supportsHttpsTrafficOnly: true + encryption: { + services: { + file: { + enabled: true + } + blob: { + enabled: true + } + } + keySource: 'Microsoft.Storage' + } + accessTier: 'Hot' + } +} + +resource storageAccountName_default_container 'Microsoft.Storage/storageAccounts/blobServices/containers@2019-04-01' = { + name: '${storageAccountName}/default/${containerName}' + dependsOn: [ + storageAccount + ] +} + +resource iot 'Microsoft.Devices/IotHubs@2018-04-01' = { + name: iotName + location: location + sku: { + name: 'S1' + capacity: 1 + } + properties: { + ipFilterRules: [] + eventHubEndpoints: { + events: { + retentionTimeInDays: 1 + partitionCount: 4 + } + } + routing: { + endpoints: { + serviceBusQueues: [] + serviceBusTopics: [] + eventHubs: [] + storageContainers: [] + } + routes: [] + fallbackRoute: { + name: '$fallback' + source: 'DeviceMessages' + condition: 'true' + endpointNames: [ + 'events' + ] + isEnabled: true + } + } + storageEndpoints: { + '$default': { + sasTtlAsIso8601: 'PT1H' + connectionString: 'DefaultEndpointsProtocol=https;AccountName=${storageAccountName};AccountKey=${listKeys(storageAccount.id, storageApiVersion).keys[0].value};EndpointSuffix=${storageEndpointSuffix}' + containerName: containerName + } + } + messagingEndpoints: { + fileNotifications: { + lockDurationAsIso8601: 'PT1M' + ttlAsIso8601: 'PT1H' + maxDeliveryCount: 10 + } + } + enableFileUploadNotifications: false + cloudToDevice: { + maxDeliveryCount: 10 + defaultTtlAsIso8601: 'PT1H' + feedback: { + lockDurationAsIso8601: 'PT1M' + ttlAsIso8601: 'PT1H' + maxDeliveryCount: 10 + } + } + features: 'None' + } +} +output IOTHUB_CONNECTION_STRING string = 'HostName=${reference(iot.id, providers('Microsoft.Devices', 'IoTHubs').apiVersions[0]).hostName};SharedAccessKeyName=iothubowner;SharedAccessKey=${listKeys(iot.id, providers('Microsoft.Devices', 'IoTHubs').apiVersions[0]).value[0].primaryKey}' + +// used for TokenCredential tests +output EVENTHUB_NAMESPACE string = '${namespace.name}.servicebus.windows.net' +output CHECKPOINTSTORE_STORAGE_ENDPOINT string = storageAccount.properties.primaryEndpoints.blob +output EVENTHUB_NAME string = eventHub.name +output EVENTHUB_LINKSONLY_NAME string = linksonly.name + +// connection strings +output EVENTHUB_CONNECTION_STRING string = listKeys( + resourceId('Microsoft.EventHub/namespaces/authorizationRules', namespaceName, 'RootManageSharedAccessKey'), + apiVersion +).primaryConnectionString +output EVENTHUB_CONNECTION_STRING_LISTEN_ONLY string = listKeys( + resourceId('Microsoft.EventHub/namespaces/authorizationRules', namespaceName, authorizedListenOnly.name), + apiVersion +).primaryConnectionString +output EVENTHUB_CONNECTION_STRING_SEND_ONLY string = listKeys( + resourceId('Microsoft.EventHub/namespaces/authorizationRules', namespaceName, authorizedSendOnly.name), + apiVersion +).primaryConnectionString +output CHECKPOINTSTORE_STORAGE_CONNECTION_STRING string = 'DefaultEndpointsProtocol=https;AccountName=${storageAccountName};AccountKey=${listKeys(storageAccount.id, storageApiVersion).keys[0].value};EndpointSuffix=${storageEndpointSuffix}' + +output RESOURCE_GROUP string = resourceGroup().name +output AZURE_SUBSCRIPTION_ID string = subscription().subscriptionId diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md index 796215fa484..988db23a29c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 1.7.1 (2024-05-20) + +### Bugs Fixed + +- Emulator strings should allow for hosts other than localhost (PR#22898) + ## 1.7.0 (2024-04-02) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go index ce0e2610dfc..4d11f9f1882 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go @@ -100,14 +100,13 @@ func ParseConnectionString(connStr string) (ConnectionStringProperties, error) { } if csp.Emulator { - // check that they're only connecting to localhost endpointParts := strings.SplitN(csp.Endpoint, ":", 3) // allow for a port, if it exists. - if len(endpointParts) < 2 || endpointParts[0] != "sb" || endpointParts[1] != "//localhost" { - // there should always be at least two parts "sb:" and "//localhost" + if len(endpointParts) < 2 || endpointParts[0] != "sb" { + // there should always be at least two parts "sb:" and "//" // with an optional 3rd piece that's the port "1111". // (we don't need to validate it's a valid host since it's been through url.Parse() above) - return ConnectionStringProperties{}, fmt.Errorf("UseDevelopmentEmulator=true can only be used with sb://localhost or sb://localhost:, not %s", csp.Endpoint) + return ConnectionStringProperties{}, fmt.Errorf("UseDevelopmentEmulator=true can only be used with sb:// or sb://:, not %s", csp.Endpoint) } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go index a2402e48ac3..f3079017326 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go @@ -4,4 +4,4 @@ package internal // Version is the semantic version number -const Version = "v1.7.0" +const Version = "v1.7.1" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/CHANGELOG.md new file mode 100644 index 00000000000..5f27242e745 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/CHANGELOG.md @@ -0,0 +1,64 @@ +# Release History + +## 1.1.0 (2023-05-09) + +### Other Changes +* Updated doc comments + +## 1.1.0-beta.1 (2023-04-11) + +### Features Added +* Added the `LogsClient.QueryResource` method which allow users to query Azure resources directly without a Log Analytics workspace + +### Other Changes +* Updated dependencies and documentation + +## 1.0.0 (2023-02-08) + +### Breaking Changes +* Removed `LogsQueryOptions.String()` +* Fix casing on some metrics fields + +### Other Changes +* Doc and example updates + +## 0.4.0 (2023-01-12) + +### Features Added +* Added `TimeInterval` type with constructor to aid with timespan creation +* Added `NewBatchQueryRequest` constructor to aid with logs batch requests +* Added `LogsQueryOptions` model for easier setting of logs options + +### Breaking Changes +* Changed type of `Body.Timespan`, `MetricsClientQueryResourceOptions.Timespan`, `Response.Timespan` from *string to *TimeInterval +* Remove `ColumnIndexLookup` field from Table struct +* Renamed `Body.Workspaces` to `Body.AdditionalWorkspaces` +* Renamed `Results.Render` and `BatchResponse.Render` to `Results.Visualization` and `BatchResponse.Visualization` + +### Other Changes +* Doc and example updates + +## 0.3.0 (2022-11-08) + +### Features Added +* Added `ColumnIndexLookup` field to Table struct +* Added type `Row` +* Added sovereign cloud support + +### Breaking Changes +* Added error return values to `NewLogsClient` and `NewMetricsClient` +* Rename `Batch` to `QueryBatch` +* Rename `NewListMetricDefinitionsPager` to `NewListDefinitionsPager` +* Rename `NewListMetricNamespacesPager` to `NewListNamespacesPager` +* Changed type of `Render` and `Statistics` from interface{} to []byte + +### Other Changes +* Updated docs with more detailed examples + +## 0.2.0 (2022-10-11) + +### Breaking Changes +* Changed format of logs `ErrorInfo` struct to custom error type + +## 0.1.0 (2022-09-08) +* This is the initial release of the `azquery` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/LICENSE.txt similarity index 92% rename from vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/LICENSE.txt index 05b0ebf5bc7..ec703274aad 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/LICENSE.txt @@ -1,6 +1,6 @@ -The MIT License (MIT) + MIT License -Copyright (c) Microsoft Corporation. +Copyright (c) Microsoft Corporation. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/MIGRATION.md new file mode 100644 index 00000000000..caf6c162586 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/MIGRATION.md @@ -0,0 +1,177 @@ +# Guide to migrate from `operationalinsights` and monitor `insights` to `azquery` + +This guide is intended to assist in the migration to the `azquery` module. `azquery` allows users to retrieve log and metric data from Azure Monitor. + +## Package consolidation + + Azure Monitor allows users to retrieve telemetry data for their Azure resources. The main two data catagories for Azure Monitor are [metrics](https://learn.microsoft.com/azure/azure-monitor/essentials/data-platform-metrics) and [logs](https://learn.microsoft.com/azure/azure-monitor/logs/data-platform-logs). + + There have been a number of [terminology](https://learn.microsoft.com/azure/azure-monitor/terminology) changes for Azure Monitor over the years which resulted in the operations being spread over multiple packages. For Go, metrics methods were contained in `github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt//insights` and logs methods resided in `github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights`. + +The new `azquery` module condenses metrics and logs functionality into one package for simpler access. The `azquery` module contains two clients: [LogsClient](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#LogsClient) and [MetricsClient](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#MetricsClient). + +Transitioning to a single package has resulted in a number of name changes, as detailed below. + +### Log name changes + +| `operationalinsights` | `azquery` | +| ----------- | ----------- | +| QueryClient.Execute | LogsClient.QueryWorkspace | +| MetadataClient.Get and MetadataClient.Post | N/A | + +The `azquery` module does not contain the `MetadataClient`. For that functionality, please use the old [`operationalinsights`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights) module or [file an issue in our github repo](https://github.com/Azure/azure-sdk-for-go/issues), so we can prioritize adding it to `azquery`. + +### Metrics name changes + +| `insights` | `azquery` | +| ----------- | ----------- | +| MetricsClient.List | MetricsClient.QueryResource | +| MetricDefinitionsClient.List | MetricsClient.NewListDefinitionsPager | +| MetricNamespacesClient.List | MetricsClient.NewListNamespacesPager | + +## Query Logs + +### `operationalinsights` +```go +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights" + "github.com/Azure/go-autorest/autorest" +) + +// create the client +client := operationalinsights.NewQueryClient() +client.Authorizer = autorest.NewAPIKeyAuthorizerWithHeaders(map[string]interface{}{ + "x-api-key": "DEMO_KEY", +}) + +// execute the query +query := "" +timespan := "2023-12-25/2023-12-26" + +res, err := client.Execute(context.TODO(), "DEMO_WORKSPACE", operationalinsights.QueryBody{Query: &query, Timespan: ×pan}) +if err != nil { + //TODO: handle error +} +``` + +### `azquery` + +Compared to previous versions, querying logs with the new `azquery` module is clearer and simpler. There are a number of name changes for clarity, like how the old `Execute` method is now [`QueryWorkspace`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#LogsClient.QueryWorkspace). In addition, there is improved time support. Before if a user added a timespan over which to query the request, it had to be a string constructed in the ISO8601 interval format. Users frequently made mistakes when constructing this string. With the new `QueryWorkspace` method, the type of timespan has been changed from a string to a new type named [`TimeInterval`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#TimeInterval). `TimeInterval` has a contructor that allows users to take advantage of Go's time package, allowing easier creation. + +```go +import ( + "context" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery" +) + +// create the logs client +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + //TODO: handle error +} +client, err := azquery.NewLogsClient(cred, nil) +if err != nil { + //TODO: handle error +} + +// execute the logs query +res, err := client.QueryWorkspace(context.TODO(), workspaceID, + azquery.Body{ + Query: to.Ptr(""), + Timespan: to.Ptr(azquery.NewTimeInterval(time.Date(2022, 12, 25, 0, 0, 0, 0, time.UTC), time.Date(2022, 12, 25, 12, 0, 0, 0, time.UTC))), + }, + nil) +if err != nil { + //TODO: handle error +} +if res.Error != nil { + //TODO: handle partial error +} +``` + +## Query Metrics + +### `insights` + +```go +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2022-10-01-preview/insights" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +// create the client +client := insights.NewMetricsClient("") +authorizer, err := auth.NewAuthorizerFromCLI() +if err == nil { + client.Authorizer = authorizer +} + +// execute the query +timespan := "2023-12-25/2023-12-26" +interval := "PT1M" +metricnames := "" +aggregation := "Average" +top := 3 +orderby := "Average asc" +filter := "BlobType eq '*'" +resultType := insights.ResultTypeData +metricnamespace := "Microsoft.Storage/storageAccounts/blobServices" + +res, err := client.List(context.TODO(), resourceURI, timespan, &interval, metricnames, aggregation, &top, orderby, filter, resultType, metricnamespace) +if err != nil { + //TODO: handle error +} +``` + +### `azquery` + +The main difference between the old and new methods of querying metrics is in the naming. The new method has an updated convention for clarity. For example, the old name of the method was simply `List`. Now, it's `QueryResource`. There have also been a number of casing fixes and the query options have been moved into the options struct. + +```go +import ( + "context" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery" +) + +// create the metrics client +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + //TODO: handle error +} +client, err := azquery.NewMetricsClient(cred, nil) +if err != nil { + //TODO: handle error +} + +// execute the metrics query +res, err := metricsClient.QueryResource(context.TODO(), resourceURI, + &azquery.MetricsClientQueryResourceOptions{ + Timespan: to.Ptr(azquery.NewTimeInterval(time.Date(2022, 12, 25, 0, 0, 0, 0, time.UTC), time.Date(2022, 12, 25, 12, 0, 0, 0, time.UTC))), + Interval: to.Ptr("PT1M"), + MetricNames: nil, + Aggregation: to.SliceOfPtrs(azquery.AggregationTypeAverage, azquery.AggregationTypeCount), + Top: to.Ptr[int32](3), + OrderBy: to.Ptr("Average asc"), + Filter: to.Ptr("BlobType eq '*'"), + ResultType: nil, + MetricNamespace: to.Ptr("Microsoft.Storage/storageAccounts/blobServices"), + }) +if err != nil { + //TODO: handle error +} +``` + + + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/README.md new file mode 100644 index 00000000000..b76736e3646 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/README.md @@ -0,0 +1,151 @@ +# Azure Monitor Query client module for Go + +The Azure Monitor Query client module is used to execute read-only queries against [Azure Monitor][azure_monitor_overview]'s two data platforms: + +- [Logs][logs_overview] - Collects and organizes log and performance data from monitored resources. Data from different sources such as platform logs from Azure services, log and performance data from virtual machines agents, and usage and performance data from apps can be consolidated into a single [Azure Log Analytics workspace][log_analytics_workspace]. The various data types can be analyzed together using the [Kusto Query Language][kusto_query_language]. See the [Kusto to SQL cheat sheet][kusto_to_sql] for more information. +- [Metrics][metrics_overview] - Collects numeric data from monitored resources into a time series database. Metrics are numerical values that are collected at regular intervals and describe some aspect of a system at a particular time. Metrics are lightweight and capable of supporting near real-time scenarios, making them particularly useful for alerting and fast detection of issues. + +[Source code][azquery_repo] | [Package (pkg.go.dev)][azquery_pkg_go] | [REST API documentation][monitor_rest_docs] | [Product documentation][monitor_docs] | [Samples][azquery_pkg_go_samples] + +## Getting started + +### Prerequisites + +* Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install) +* Azure subscription - [Create a free account][azure_sub] +* To query some logs, an Azure Log Analytics workspace ID - Create an [Azure Log Analytics workspace][log_analytics_workspace_create] +* To query metrics and some logs, the Resource URI of an Azure resource (Storage Account, Key Vault, CosmosDB, etc.) that you plan to monitor + +### Install the packages + +Install the `azquery` and `azidentity` modules with `go get`: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +The [azidentity][azure_identity] module is used for Azure Active Directory authentication during client construction. + +### Authentication + +An authenticated client object is required to execute a query. The examples demonstrate using [azidentity.NewDefaultAzureCredential][default_cred_ref] to authenticate; however, the client accepts any [azidentity][azure_identity] credential. See the [azidentity][azure_identity] documentation for more information about other credential types. + +The clients default to the Azure public cloud. For other cloud configurations, see the [cloud][cloud_documentation] package documentation. + +#### Create a logs client + +Example [logs client][example_logs_client] + +#### Create a metrics client + +Example [metrics client][example_metrics_client] + +## Key concepts + +### Timespan + +It's best practice to always query with a timespan (type `TimeInterval`) to prevent excessive queries of the entire logs or metrics data set. Log queries use the ISO8601 Time Interval Standard. All time should be represented in UTC. If the timespan is included in both the Kusto query string and `Timespan` field, the timespan is the intersection of the two values. + +Use the `NewTimeInterval()` method for easy creation. + +### Metrics data structure + +Each set of metric values is a time series with the following characteristics: + +- The time the value was collected +- The resource associated with the value +- A namespace that acts like a category for the metric +- A metric name +- The value itself +- Some metrics may have multiple dimensions as described in [multi-dimensional metrics][multi-metrics]. Custom metrics can have up to 10 dimensions. + +### Logs query rate limits and throttling + +The Log Analytics service applies throttling when the request rate is too high. Limits, such as the maximum number of rows returned, are also applied on the Kusto queries. For more information, see [Query API][service_limits]. + +If you're executing a batch logs query, a throttled request will return a `ErrorInfo` object. That object's `code` value will be `ThrottledError`. + +### Advanced logs queries + +#### Query multiple workspaces + +To run the same query against multiple Log Analytics workspaces, add the additional workspace ID strings to the `AdditionalWorkspaces` slice in the `Body` struct. + +When multiple workspaces are included in the query, the logs in the result table are not grouped according to the workspace from which they were retrieved. + +#### Increase wait time, include statistics, include render (visualization) + +The `LogsQueryOptions` type is used for advanced logs options. + +* By default, your query will run for up to three minutes. To increase the default timeout, set `LogsQueryOptions.Wait` to the desired number of seconds. The maximum wait time is 10 minutes (600 seconds). + +* To get logs query execution statistics, such as CPU and memory consumption, set `LogsQueryOptions.Statistics` to `true`. + +* To get visualization data for logs queries, set `LogsQueryOptions.Visualization` to `true`. + +```go +azquery.LogsClientQueryWorkspaceOptions{ + Options: &azquery.LogsQueryOptions{ + Statistics: to.Ptr(true), + Visualization: to.Ptr(true), + Wait: to.Ptr(600), + }, + } +``` + +To do the same with `QueryBatch`, set the values in the `BatchQueryRequest.Headers` map with a key of "prefer", or use the `NewBatchQueryRequest` method. + +## Examples + +Get started with our [examples][azquery_pkg_go_samples]. + +* For the majority of log queries, use the `LogsClient.QueryWorkspace` or the `LogsClient.QueryResource` method. Only use the `LogsClient.QueryBatch` method in advanced scenerios. + +* Use `MetricsClient.QueryResource` for metric queries. + +## Troubleshooting + +See our [troubleshooting guide][troubleshooting_guide] for details on how to diagnose various failure scenarios. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a [Contributor License Agreement (CLA)][cla] declaring that you have the right to, and actually do, grant us the rights to use your contribution. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate +the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to +do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For more information, see +the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or +comments. + + +[azquery_repo]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/monitor/azquery +[azquery_pkg_go]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery +[azquery_pkg_go_docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#section-documentation +[azquery_pkg_go_samples]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#pkg-examples +[azure_identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azure_sub]: https://azure.microsoft.com/free/ +[azure_monitor_overview]: https://learn.microsoft.com/azure/azure-monitor/overview +[context]: https://pkg.go.dev/context +[cloud_documentation]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud +[default_cred_ref]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity#defaultazurecredential +[example_logs_client]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#example-NewLogsClient +[example_metrics_client]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#example-NewMetricsClient +[go_samples]: (https://github.com/Azure-Samples/azure-sdk-for-go-samples) +[kusto_query_language]: https://learn.microsoft.com/azure/data-explorer/kusto/query/ +[kusto_to_sql]: https://learn.microsoft.com/azure/data-explorer/kusto/query/sqlcheatsheet +[log_analytics_workspace]: https://learn.microsoft.com/azure/azure-monitor/logs/log-analytics-workspace-overview +[log_analytics_workspace_create]: https://learn.microsoft.com/azure/azure-monitor/logs/quick-create-workspace +[logs_overview]: https://learn.microsoft.com/azure/azure-monitor/logs/data-platform-logs +[metrics_overview]: https://learn.microsoft.com/azure/azure-monitor/essentials/data-platform-metrics +[monitor_docs]: https://learn.microsoft.com/azure/azure-monitor/ +[monitor_rest_docs]: https://learn.microsoft.com/rest/api/monitor/ +[multi-metrics]: https://learn.microsoft.com/azure/azure-monitor/essentials/data-platform-metrics#multi-dimensional-metrics +[service_limits]: https://learn.microsoft.com/azure/azure-monitor/service-limits#la-query-api +[troubleshooting_guide]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/monitor/azquery/TROUBLESHOOTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/TROUBLESHOOTING.md new file mode 100644 index 00000000000..b3eaedd89e3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/TROUBLESHOOTING.md @@ -0,0 +1,177 @@ +# Troubleshooting Azure Monitor Query client library issues + +This troubleshooting guide contains instructions to diagnose frequently encountered issues while using the Azure +Monitor Query client library for Go. + +## Table of contents + +* [General Troubleshooting](#general-troubleshooting) + * [Error Handling](#error-handling) + * [Logging](#logging) + * [Troubleshooting authentication issues with logs and metrics query requests](#authentication-errors) +* [Troubleshooting Logs Query](#troubleshooting-logs-query) + * [Troubleshooting authorization errors](#troubleshooting-authorization-errors-for-logs-query) + * [Troubleshooting invalid Kusto query](#troubleshooting-invalid-kusto-query) + * [Troubleshooting empty log query results](#troubleshooting-empty-log-query-results) + * [Troubleshooting server timeouts when executing logs query request](#troubleshooting-server-timeouts-when-executing-logs-query-request) +* [Troubleshooting Metrics Query](#troubleshooting-metrics-query) + * [Troubleshooting authorization errors](#troubleshooting-authorization-errors-for-metrics-query) + * [Troubleshooting unsupported granularity for metrics query](#troubleshooting-unsupported-granularity-for-metrics-query) + +## General Troubleshooting + +### Error Handling + +All methods which send HTTP requests return `*azcore.ResponseError` when these requests fail. `ResponseError` has error details and the raw response from Monitor Query. + +For Logs, an error may also be returned in the response's `ErrorInfo` struct, usually to indicate a partial error from the service. + +### Logging + +This module uses the logging implementation in `azcore`. To turn on logging for all Azure SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. By default, the logger writes to stderr. Use the `azcore/log` package to control log output. For example, logging only HTTP request and response events, and printing them to stdout: + +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// Print log events to stdout +azlog.SetListener(func(cls azlog.Event, msg string) { + fmt.Println(msg) +}) + +// Includes only requests and responses in credential logs +azlog.SetEvents(azlog.EventRequest, azlog.EventResponse) +``` + +### Authentication errors + +Azure Monitor Query supports Azure Active Directory authentication. Both LogsClient and +MetricsClient take in a `credential` as a parameter in their constructors. To provide a valid credential, you can use +`azidentity` package. For more details on getting started, refer to +the [README][readme_authentication] +of Azure Monitor Query library. For details on the credential types supported in `azidentity`, see the [Azure Identity library's documentation][azidentity_docs]. + +For more help with troubleshooting authentication errors, see the Azure Identity client library [troubleshooting guide][azidentity_troubleshooting]. + +## Troubleshooting Logs Query + +### Troubleshooting authorization errors for logs query + +If you get an HTTP error with status code 403 (Forbidden), it means that the provided credentials does not have +sufficient permissions to query the workspace. +```text +{"error":{"message":"The provided credentials have insufficient access to perform the requested operation","code":"InsufficientAccessError","correlationId":""}} +``` + +1. Check that the application or user that is making the request has sufficient permissions: + * You can refer to this document to [manage access to workspaces][workspace_access] +2. If the user or application is granted sufficient privileges to query the workspace, make sure you are + authenticating as that user/application. If you are authenticating using the + [DefaultAzureCredential][default_azure_cred] + then check the logs to verify that the credential used is the one you expected. To enable logging, see [enable + client logging](#logging) section above. + +For more help with troubleshooting authentication errors, see the Azure Identity client library [troubleshooting guide][azidentity_troubleshooting]. + +### Troubleshooting invalid Kusto query + +If you get an HTTP error with status code 400 (Bad Request), you may have an error in your Kusto query and you'll +see an error message similar to the one below. + +```text +{"error":{"message":"The request had some invalid properties","code":"BadArgumentError","correlationId":"","innererror":{"code":"SyntaxError","message":"A recognition error occurred in the query.","innererror":{"code":"SYN0002","message":"Query could not be parsed at 'joi' on line [2,244]","line":2,"pos":244,"token":"joi"}}}} +``` + +The error message in `innererror` may include the location where the Kusto query has an error plus further details. You may also refer to the [Kusto Query Language][kusto] reference docs to learn more about querying logs using KQL. + +### Troubleshooting empty log query results + +If your Kusto query returns empty with no logs, please validate the following: + +- You have the right workspace ID or resource ID +- You are setting the correct time interval for the query. Try lengthening the time interval for your query to see if that + returns any results. +- If your Kusto query also has a time interval, the query is evaluated for the intersection of the time interval in the + query string and the time interval set in the `Body.Timespan` field of the request query. The intersection of + these time intervals may not have any logs. To avoid any confusion, it's recommended to remove any time interval in + the Kusto query string and use `Body.Timespan` explicitly. +- Your workspace or resource actually has logs to query. Sometimes, especially with newly created resources, + there are no logs yet to query. + +### Troubleshooting server timeouts when executing logs query request + +Some complex Kusto queries can take a long time to complete. These queries are aborted by the service if they run for more than 3 minutes. For such scenarios, the query APIs on LogsClient, provide options to configure the timeout on the server. The server timeout can be extended up to 10 minutes. + +You may see an error as follows: + +``` +Code: GatewayTimeout +Message: Gateway timeout +Inner error: { + "code": "GatewayTimeout", + "message": "Unable to unzip response" +} +``` + +The following code shows an example of setting the server timeout. By setting this server timeout, the Azure Monitor Query library will automatically extend the client timeout to wait for 10 minutes for the server to respond. + +```go +workspaceID := "" +options := &azquery.LogsClientQueryWorkspaceOptions{ + Options: &azquery.LogsQueryOptions{ + Wait: to.Ptr(600), // increases wait time to ten minutes + }, + } + +res, err := logsClient.QueryWorkspace(context.Background(), + workspaceID, + azquery.Body{Query: to.Ptr("AzureActivity + | summarize Count = count() by ResourceGroup + | top 10 by Count + | project ResourceGroup")}, + options) +if err != nil { + //TODO: handle error +} +_ = res +``` + +## Troubleshooting Metrics Query + +### Troubleshooting authorization errors for metrics query + +If you get an HTTP error with status code 403 (Forbidden), it means that the provided credentials does not have +sufficient permissions to query the workspace. +```text +{"error":{"code":"AuthorizationFailed","message":"The client '71d56230-5920-4856-8f33-c030b269d870' with object id '71d56230-5920-4856-8f33-c030b269d870' does not have authorization to perform action 'microsoft.insights/metrics/read' over scope '/subscriptions/faa080af-c1d8-40ad-9cce-e1a450ca5b57/resourceGroups/srnagar-azuresdkgroup/providers/Microsoft.CognitiveServices/accounts/srnagara-textanalytics/providers/microsoft.insights' or the scope is invalid. If access was recently granted, please refresh your credentials."}} +``` + +1. Check that the application or user that is making the request has sufficient permissions. +2. If the user or application is granted sufficient privileges to query the resource, make sure you are + authenticating as that user/application. If you are authenticating using the + [DefaultAzureCredential][default_azure_cred] + then check the logs to verify that the credential used is the one you expected. To enable logging, see [enable + client logging](#logging) section above. + +For more help on troubleshooting authentication errors, please see the Azure Identity client library [troubleshooting +guide][azidentity_troubleshooting] + +### Troubleshooting unsupported granularity for metrics query + +If you notice the following exception, this is due to an invalid time granularity in the metrics query request. Your +query might look something like the following where `MetricsClientQueryResourceOptions.Interval` is set to an unsupported +duration. + +```text +{"code":"BadRequest","message":"Invalid time grain duration: PT10M, supported ones are: 00:01:00,00:05:00,00:15:00,00:30:00,01:00:00,06:00:00,12:00:00,1.00:00:00"} +``` + +As documented in the error message, the supported granularity for metrics queries are 1 minute, 5 minutes, 15 minutes, +30 minutes, 1 hour, 6 hours, 12 hours and 1 day. + + +[azidentity_docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azidentity_troubleshooting]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/TROUBLESHOOTING.md +[default_azure_cred]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity#defaultazurecredential +[kusto]: https://learn.microsoft.com/azure/data-explorer/kusto/query +[readme_authentication]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/monitor/azquery#authentication +[workspace_access]: https://learn.microsoft.com/azure/azure-monitor/logs/manage-access#manage-access-using-workspace-permissions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/assets.json new file mode 100644 index 00000000000..555e2357a7f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/monitor/azquery", + "Tag": "go/monitor/azquery_4e1ec7fc44" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/autorest.md new file mode 100644 index 00000000000..d2b07a06316 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/autorest.md @@ -0,0 +1,206 @@ +## Go + +``` yaml +title: MonitorQueryClient +description: Azure Monitor Query Go Client +generated-metadata: false + +clear-output-folder: false +export-clients: true +go: true +input-file: + - https://github.com/Azure/azure-rest-api-specs/blob/72427ef3ff5875bd8409ef112ef5e6f3cf2b8795/specification/operationalinsights/data-plane/Microsoft.OperationalInsights/stable/2022-10-27/OperationalInsights.json + - https://github.com/Azure/azure-rest-api-specs/blob/dba6ed1f03bda88ac6884c0a883246446cc72495/specification/monitor/resource-manager/Microsoft.Insights/stable/2018-01-01/metricDefinitions_API.json + - https://github.com/Azure/azure-rest-api-specs/blob/dba6ed1f03bda88ac6884c0a883246446cc72495/specification/monitor/resource-manager/Microsoft.Insights/stable/2018-01-01/metrics_API.json + - https://github.com/Azure/azure-rest-api-specs/blob/dba6ed1f03bda88ac6884c0a883246446cc72495/specification/monitor/resource-manager/Microsoft.Insights/preview/2017-12-01-preview/metricNamespaces_API.json +license-header: MICROSOFT_MIT_NO_VERSION +module: github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery +openapi-type: "data-plane" +output-folder: ../azquery +override-client-name: LogsClient +security: "AADToken" +use: "@autorest/go@4.0.0-preview.46" +version: "^3.0.0" + +directive: + # delete extra endpoints + - from: swagger-document + where: $["paths"] + transform: > + delete $["/workspaces/{workspaceId}/metadata"]; + - from: swagger-document + where: $["x-ms-paths"] + transform: > + delete $["/{resourceId}/query?disambiguation_dummy"]; + + # delete extra operations + - remove-operation: Metadata_Post + - remove-operation: Metadata_Get + - remove-operation: Query_Get + - remove-operation: Query_ResourceGet + - remove-operation: Query_ResourceExecuteXms + - remove-operation: Query_ResourceGetXms + + # delete metadata models + - remove-model: metadataResults + - remove-model: metadataCategory + - remove-model: metadataSolution + - remove-model: metadataResourceType + - remove-model: metadataTable + - remove-model: metadataFunction + - remove-model: metadataQuery + - remove-model: metadataApplication + - remove-model: metadataWorkspace + - remove-model: metadataResource + - remove-model: metadataPermissions + + # rename operations to generate into metrics and logs clients + - rename-operation: + from: Query_Execute + to: Logs_QueryWorkspace + - rename-operation: + from: Query_Batch + to: Logs_QueryBatch + - rename-operation: + from: Query_ResourceExecute + to: Logs_QueryResource + - rename-operation: + from: Metrics_List + to: Metrics_QueryResource + - rename-operation: + from: MetricDefinitions_List + to: Metrics_ListDefinitions + - rename-operation: + from: MetricNamespaces_List + to: Metrics_ListNamespaces + + # rename some metrics fields + - from: swagger-document + where: $.definitions.Metric.properties.timeseries + transform: $["x-ms-client-name"] = "TimeSeries" + - from: swagger-document + where: $.definitions.TimeSeriesElement.properties.metadatavalues + transform: $["x-ms-client-name"] = "MetadataValues" + - from: swagger-document + where: $.definitions.Response.properties.resourceregion + transform: $["x-ms-client-name"] = "ResourceRegion" + - from: swagger-document + where: $.parameters.MetricNamespaceParameter + transform: $["x-ms-client-name"] = "MetricNamespace" + - from: swagger-document + where: $.parameters.MetricNamesParameter + transform: $["x-ms-client-name"] = "MetricNames" + - from: swagger-document + where: $.parameters.OrderByParameter + transform: $["x-ms-client-name"] = "OrderBy" + + # rename Body.Workspaces to Body.AdditionalWorkspaces + - from: swagger-document + where: $.definitions.queryBody.properties.workspaces + transform: $["x-ms-client-name"] = "AdditionalWorkspaces" + + # rename Render to Visualization + - from: swagger-document + where: $.definitions.queryResults.properties.render + transform: $["x-ms-client-name"] = "Visualization" + - from: swagger-document + where: $.definitions.batchQueryResults.properties.render + transform: $["x-ms-client-name"] = "Visualization" + + # rename BatchQueryRequest.ID to BatchQueryRequest.CorrelationID + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.id + transform: $["x-ms-client-name"] = "CorrelationID" + - from: swagger-document + where: $.definitions.batchQueryResponse.properties.id + transform: $["x-ms-client-name"] = "CorrelationID" + + # rename BatchQueryRequest.Workspace to BatchQueryRequest.WorkspaceID + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.workspace + transform: $["x-ms-client-name"] = "WorkspaceID" + + # rename Prefer to Options + - from: swagger-document + where: $.parameters.PreferHeaderParameter + transform: $["x-ms-client-name"] = "Options" + - from: models.go + where: $ + transform: return $.replace(/Options \*string/g, "Options *LogsQueryOptions"); + - from: logs_client.go + where: $ + transform: return $.replace(/\*options\.Options/g, "options.Options.preferHeader()"); + + # add default values for batch request path and method attributes + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.path + transform: $["x-ms-client-default"] = "/query" + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.method + transform: $["x-ms-client-default"] = "POST" + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.path.x-ms-enum + transform: $["modelAsString"] = true + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.path.x-ms-enum + transform: $["name"] = "BatchQueryRequestPath" + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.method.x-ms-enum + transform: $["modelAsString"] = true + - from: swagger-document + where: $.definitions.batchQueryRequest.properties.method.x-ms-enum + transform: $["name"] = "BatchQueryRequestMethod" + + # add descriptions for models and constants that don't have them + - from: constants.go + where: $ + transform: return $.replace(/type ResultType string/, "//ResultType - Reduces the set of data collected. The syntax allowed depends on the operation. See the operation's description for details.\ntype ResultType string"); + + # delete unused error models + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type (?:ErrorResponse|ErrorResponseAutoGenerated|ErrorInfo|ErrorDetail).+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(\w \*?(?:ErrorResponse|ErrorResponseAutoGenerated|ErrorInfo|ErrorDetail)\).*\{\s(?:.+\s)+\}\s/g, ""); + + # point the clients to the correct host url + - from: + - logs_client.go + - metrics_client.go + where: $ + transform: return $.replace(/host/g, "client.host"); + - from: + - logs_client.go + - metrics_client.go + where: $ + transform: return $.replace(/internal \*azcore.Client/g, "host string\n internal *azcore.Client"); + + # delete generated host url + - from: constants.go + where: $ + transform: return $.replace(/const host = "(.*?)"/, ""); + + # change Table.Rows from type [][]interface{} to type []Row + - from: models.go + where: $ + transform: return $.replace(/Rows \[\]\[\]byte/, "Rows []Row"); + + # change type of timespan from *string to *TimeInterval + - from: models.go + where: $ + transform: return $.replace(/Timespan \*string/g, "Timespan *TimeInterval"); + - from: metrics_client.go + where: $ + transform: return $.replace(/reqQP\.Set\(\"timespan\", \*options\.Timespan\)/g, "reqQP.Set(\"timespan\", string(*options.Timespan))"); + + # change type of MetricsClientQueryResourceOptions.Aggregation from *string to []*AggregationType + - from: models.go + where: $ + transform: return $.replace(/Aggregation \*string/g, "Aggregation []*AggregationType"); + - from: metrics_client.go + where: $ + transform: return $.replace(/\*options.Aggregation/g, "aggregationTypeToString(options.Aggregation)"); + - from: swagger-document + where: $.parameters.AggregationsParameter + transform: $["description"] = "The list of aggregation types to retrieve" \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/build.go new file mode 100644 index 00000000000..8634768d24b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md --rawjson-as-bytes +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azquery diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/ci.yml new file mode 100644 index 00000000000..1040bbb8248 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/ci.yml @@ -0,0 +1,35 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/monitor/azquery + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/monitor/azquery + + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'monitor/azquery' + RunLiveTests: true + SupportedClouds: 'Public,UsGov,China' + EnvVars: + AZURE_CLIENT_ID: $(AZQUERY_CLIENT_ID) + AZURE_TENANT_ID: $(AZQUERY_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZQUERY_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZQUERY_SUBSCRIPTION_ID) \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/cloud_config.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/cloud_config.go new file mode 100644 index 00000000000..110ad90e0e8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/cloud_config.go @@ -0,0 +1,42 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azquery + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + +// Cloud Service Names for Monitor Query Logs and Metrics, used to identify the respective cloud.ServiceConfiguration +const ( + ServiceNameLogs cloud.ServiceName = "azqueryLogs" + ServiceNameMetrics cloud.ServiceName = "azqueryMetrics" +) + +func init() { + cloud.AzureChina.Services[ServiceNameLogs] = cloud.ServiceConfiguration{ + Audience: "https://api.loganalytics.azure.cn", + Endpoint: "https://api.loganalytics.azure.cn/v1", + } + cloud.AzureGovernment.Services[ServiceNameLogs] = cloud.ServiceConfiguration{ + Audience: "https://api.loganalytics.us", + Endpoint: "https://api.loganalytics.us/v1", + } + cloud.AzurePublic.Services[ServiceNameLogs] = cloud.ServiceConfiguration{ + Audience: "https://api.loganalytics.io", + Endpoint: "https://api.loganalytics.io/v1", + } + cloud.AzureChina.Services[ServiceNameMetrics] = cloud.ServiceConfiguration{ + Audience: "https://management.chinacloudapi.cn/", + Endpoint: "https://management.chinacloudapi.cn/", + } + cloud.AzureGovernment.Services[ServiceNameMetrics] = cloud.ServiceConfiguration{ + Audience: "https://management.usgovcloudapi.net/", + Endpoint: "https://management.usgovcloudapi.net/", + } + cloud.AzurePublic.Services[ServiceNameMetrics] = cloud.ServiceConfiguration{ + Audience: "https://management.azure.com", + Endpoint: "https://management.azure.com", + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/constants.go new file mode 100644 index 00000000000..93306a2a8ff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/constants.go @@ -0,0 +1,188 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azquery + +// AggregationType - the aggregation type of the metric. +type AggregationType string + +const ( + AggregationTypeNone AggregationType = "None" + AggregationTypeAverage AggregationType = "Average" + AggregationTypeCount AggregationType = "Count" + AggregationTypeMinimum AggregationType = "Minimum" + AggregationTypeMaximum AggregationType = "Maximum" + AggregationTypeTotal AggregationType = "Total" +) + +// PossibleAggregationTypeValues returns the possible values for the AggregationType const type. +func PossibleAggregationTypeValues() []AggregationType { + return []AggregationType{ + AggregationTypeNone, + AggregationTypeAverage, + AggregationTypeCount, + AggregationTypeMinimum, + AggregationTypeMaximum, + AggregationTypeTotal, + } +} + +// BatchQueryRequestMethod - The method of a single request in a batch, defaults to POST +type BatchQueryRequestMethod string + +const ( + BatchQueryRequestMethodPOST BatchQueryRequestMethod = "POST" +) + +// PossibleBatchQueryRequestMethodValues returns the possible values for the BatchQueryRequestMethod const type. +func PossibleBatchQueryRequestMethodValues() []BatchQueryRequestMethod { + return []BatchQueryRequestMethod{ + BatchQueryRequestMethodPOST, + } +} + +// BatchQueryRequestPath - The query path of a single request in a batch, defaults to /query +type BatchQueryRequestPath string + +const ( + BatchQueryRequestPathQuery BatchQueryRequestPath = "/query" +) + +// PossibleBatchQueryRequestPathValues returns the possible values for the BatchQueryRequestPath const type. +func PossibleBatchQueryRequestPathValues() []BatchQueryRequestPath { + return []BatchQueryRequestPath{ + BatchQueryRequestPathQuery, + } +} + +// LogsColumnType - The data type of this column. +type LogsColumnType string + +const ( + LogsColumnTypeBool LogsColumnType = "bool" + LogsColumnTypeDatetime LogsColumnType = "datetime" + LogsColumnTypeDecimal LogsColumnType = "decimal" + LogsColumnTypeDynamic LogsColumnType = "dynamic" + LogsColumnTypeGUID LogsColumnType = "guid" + LogsColumnTypeInt LogsColumnType = "int" + LogsColumnTypeLong LogsColumnType = "long" + LogsColumnTypeReal LogsColumnType = "real" + LogsColumnTypeString LogsColumnType = "string" + LogsColumnTypeTimespan LogsColumnType = "timespan" +) + +// PossibleLogsColumnTypeValues returns the possible values for the LogsColumnType const type. +func PossibleLogsColumnTypeValues() []LogsColumnType { + return []LogsColumnType{ + LogsColumnTypeBool, + LogsColumnTypeDatetime, + LogsColumnTypeDecimal, + LogsColumnTypeDynamic, + LogsColumnTypeGUID, + LogsColumnTypeInt, + LogsColumnTypeLong, + LogsColumnTypeReal, + LogsColumnTypeString, + LogsColumnTypeTimespan, + } +} + +// MetricClass - The class of the metric. +type MetricClass string + +const ( + MetricClassAvailability MetricClass = "Availability" + MetricClassErrors MetricClass = "Errors" + MetricClassLatency MetricClass = "Latency" + MetricClassSaturation MetricClass = "Saturation" + MetricClassTransactions MetricClass = "Transactions" +) + +// PossibleMetricClassValues returns the possible values for the MetricClass const type. +func PossibleMetricClassValues() []MetricClass { + return []MetricClass{ + MetricClassAvailability, + MetricClassErrors, + MetricClassLatency, + MetricClassSaturation, + MetricClassTransactions, + } +} + +// MetricUnit - The unit of the metric. +type MetricUnit string + +const ( + MetricUnitBitsPerSecond MetricUnit = "BitsPerSecond" + MetricUnitByteSeconds MetricUnit = "ByteSeconds" + MetricUnitBytes MetricUnit = "Bytes" + MetricUnitBytesPerSecond MetricUnit = "BytesPerSecond" + MetricUnitCores MetricUnit = "Cores" + MetricUnitCount MetricUnit = "Count" + MetricUnitCountPerSecond MetricUnit = "CountPerSecond" + MetricUnitMilliCores MetricUnit = "MilliCores" + MetricUnitMilliSeconds MetricUnit = "MilliSeconds" + MetricUnitNanoCores MetricUnit = "NanoCores" + MetricUnitPercent MetricUnit = "Percent" + MetricUnitSeconds MetricUnit = "Seconds" + MetricUnitUnspecified MetricUnit = "Unspecified" +) + +// PossibleMetricUnitValues returns the possible values for the MetricUnit const type. +func PossibleMetricUnitValues() []MetricUnit { + return []MetricUnit{ + MetricUnitBitsPerSecond, + MetricUnitByteSeconds, + MetricUnitBytes, + MetricUnitBytesPerSecond, + MetricUnitCores, + MetricUnitCount, + MetricUnitCountPerSecond, + MetricUnitMilliCores, + MetricUnitMilliSeconds, + MetricUnitNanoCores, + MetricUnitPercent, + MetricUnitSeconds, + MetricUnitUnspecified, + } +} + +// NamespaceClassification - Kind of namespace +type NamespaceClassification string + +const ( + NamespaceClassificationCustom NamespaceClassification = "Custom" + NamespaceClassificationPlatform NamespaceClassification = "Platform" + NamespaceClassificationQos NamespaceClassification = "Qos" +) + +// PossibleNamespaceClassificationValues returns the possible values for the NamespaceClassification const type. +func PossibleNamespaceClassificationValues() []NamespaceClassification { + return []NamespaceClassification{ + NamespaceClassificationCustom, + NamespaceClassificationPlatform, + NamespaceClassificationQos, + } +} + +// ResultType - Reduces the set of data collected. The syntax allowed depends on the operation. See the operation's description for details. +type ResultType string + +const ( + ResultTypeData ResultType = "Data" + ResultTypeMetadata ResultType = "Metadata" +) + +// PossibleResultTypeValues returns the possible values for the ResultType const type. +func PossibleResultTypeValues() []ResultType { + return []ResultType{ + ResultTypeData, + ResultTypeMetadata, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/custom_client.go new file mode 100644 index 00000000000..90c62987742 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/custom_client.go @@ -0,0 +1,196 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azquery + +// this file contains handwritten additions to the generated code + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// MetricsClientOptions contains optional settings for MetricsClient. +type MetricsClientOptions struct { + azcore.ClientOptions +} + +// LogsClientOptions contains optional settings for LogsClient. +type LogsClientOptions struct { + azcore.ClientOptions +} + +// NewLogsClient creates a client that accesses Azure Monitor logs data. +func NewLogsClient(credential azcore.TokenCredential, options *LogsClientOptions) (*LogsClient, error) { + if options == nil { + options = &LogsClientOptions{} + } + if reflect.ValueOf(options.Cloud).IsZero() { + options.Cloud = cloud.AzurePublic + } + c, ok := options.Cloud.Services[ServiceNameLogs] + if !ok || c.Audience == "" || c.Endpoint == "" { + return nil, errors.New("provided Cloud field is missing Azure Monitor Logs configuration") + } + + authPolicy := runtime.NewBearerTokenPolicy(credential, []string{c.Audience + "/.default"}, nil) + azcoreClient, err := azcore.NewClient("azquery.LogsClient", version, runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}, &options.ClientOptions) + if err != nil { + return nil, err + } + return &LogsClient{host: c.Endpoint, internal: azcoreClient}, nil +} + +// NewMetricsClient creates a client that accesses Azure Monitor metrics data. +func NewMetricsClient(credential azcore.TokenCredential, options *MetricsClientOptions) (*MetricsClient, error) { + if options == nil { + options = &MetricsClientOptions{} + } + if reflect.ValueOf(options.Cloud).IsZero() { + options.Cloud = cloud.AzurePublic + } + c, ok := options.Cloud.Services[ServiceNameMetrics] + if !ok || c.Audience == "" || c.Endpoint == "" { + return nil, errors.New("provided Cloud field is missing Azure Monitor Metrics configuration") + } + + authPolicy := runtime.NewBearerTokenPolicy(credential, []string{c.Audience + "/.default"}, nil) + azcoreClient, err := azcore.NewClient("azquery.MetricsClient", version, runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}, &options.ClientOptions) + if err != nil { + return nil, err + } + return &MetricsClient{host: c.Endpoint, internal: azcoreClient}, nil +} + +// ErrorInfo - The code and message for an error. +type ErrorInfo struct { + // REQUIRED; A machine readable error code. + Code string + + // full error message detailing why the operation failed. + data []byte +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ErrorInfo. +func (e *ErrorInfo) UnmarshalJSON(data []byte) error { + e.data = data + ei := struct{ Code string }{} + if err := json.Unmarshal(data, &ei); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + e.Code = ei.Code + + return nil +} + +// Error implements a custom error for type ErrorInfo. +func (e *ErrorInfo) Error() string { + return string(e.data) +} + +// Row of data in a table, types of data used by service specified in LogsColumnType +type Row []any + +// TimeInterval specifies the time range over which to query. +// Use NewTimeInterval() for help formatting. +// Follows the ISO8601 time interval standard with most common +// format being startISOTime/endISOTime. ISO8601 durations also supported (ex "PT2H" for last two hours). +// Use UTC for all times. +type TimeInterval string + +// NewTimeInterval creates a TimeInterval for use in a query. +// Use UTC for start and end times. +func NewTimeInterval(start time.Time, end time.Time) TimeInterval { + return TimeInterval(start.Format(time.RFC3339) + "/" + end.Format(time.RFC3339)) +} + +// Values returns the interval's start and end times if it's in the format startISOTime/endISOTime, else it will return an error. +func (i TimeInterval) Values() (time.Time, time.Time, error) { + // split into different start and end times + times := strings.Split(string(i), "/") + if len(times) != 2 { + return time.Time{}, time.Time{}, errors.New("time interval should be in format startISOTime/endISOTime") + } + start, err := time.Parse(time.RFC3339, times[0]) + if err != nil { + return time.Time{}, time.Time{}, errors.New("error parsing start time") + } + end, err := time.Parse(time.RFC3339, times[1]) + if err != nil { + return time.Time{}, time.Time{}, errors.New("error parsing end time") + } + // return times + return start, end, nil +} + +// LogsQueryOptions sets server timeout, query statistics and visualization information +type LogsQueryOptions struct { + // Set Statistics to true to get logs query execution statistics, + // such as CPU and memory consumption. Defaults to false. + Statistics *bool + + // Set Visualization to true to get visualization + // data for logs queries. Defaults to false. + Visualization *bool + + // By default, the Azure Monitor Query service will run your + // query for up to three minutes. To increase the default timeout, + // set Wait to desired number of seconds. + // Max wait time the service will allow is ten minutes (600 seconds). + Wait *int +} + +// preferHeader converts LogsQueryOptions from struct to properly formatted sting +// to be used in the request Prefer Header +func (l LogsQueryOptions) preferHeader() string { + var options []string + if l.Statistics != nil && *l.Statistics { + options = append(options, "include-statistics=true") + } + if l.Visualization != nil && *l.Visualization { + options = append(options, "include-render=true") + } + if l.Wait != nil { + options = append(options, fmt.Sprintf("wait=%d", *l.Wait)) + } + return strings.Join(options, ",") +} + +// NewBatchQueryRequest creates a new BatchQueryRequest. +func NewBatchQueryRequest(workspaceID string, query string, timespan TimeInterval, correlationID string, options LogsQueryOptions) BatchQueryRequest { + var optionsMap map[string]*string + if options.Statistics != nil || options.Visualization != nil || options.Wait != nil { + optionsMap = make(map[string]*string) + optionsString := options.preferHeader() + optionsMap["prefer"] = &optionsString + } + + return BatchQueryRequest{ + Body: &Body{Query: &query, Timespan: ×pan}, + CorrelationID: &correlationID, + WorkspaceID: &workspaceID, + Headers: optionsMap, + } +} + +// aggregationTypeToString converts []*AggregationType to string, so the values can be sent +// in MetricsClient.QueryResource +func aggregationTypeToString(aggregations []*AggregationType) string { + var s []string + for _, aggregation := range aggregations { + s = append(s, string(*aggregation)) + } + return strings.Join(s, ",") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/logs_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/logs_client.go new file mode 100644 index 00000000000..d12a59eda26 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/logs_client.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azquery + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// LogsClient contains the methods for the Logs group. +// Don't use this type directly, use a constructor function instead. +type LogsClient struct { + host string + internal *azcore.Client +} + +// QueryBatch - Executes a batch of Analytics queries for data. Here [https://learn.microsoft.com/azure/azure-monitor/logs/api/batch-queries] +// is an example for using POST with an Analytics query. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-10-27 +// - body - The batch request body +// - options - LogsClientQueryBatchOptions contains the optional parameters for the LogsClient.QueryBatch method. +func (client *LogsClient) QueryBatch(ctx context.Context, body BatchRequest, options *LogsClientQueryBatchOptions) (LogsClientQueryBatchResponse, error) { + req, err := client.queryBatchCreateRequest(ctx, body, options) + if err != nil { + return LogsClientQueryBatchResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LogsClientQueryBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LogsClientQueryBatchResponse{}, runtime.NewResponseError(resp) + } + return client.queryBatchHandleResponse(resp) +} + +// queryBatchCreateRequest creates the QueryBatch request. +func (client *LogsClient) queryBatchCreateRequest(ctx context.Context, body BatchRequest, options *LogsClientQueryBatchOptions) (*policy.Request, error) { + urlPath := "/$batch" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// queryBatchHandleResponse handles the QueryBatch response. +func (client *LogsClient) queryBatchHandleResponse(resp *http.Response) (LogsClientQueryBatchResponse, error) { + result := LogsClientQueryBatchResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.BatchResponse); err != nil { + return LogsClientQueryBatchResponse{}, err + } + return result, nil +} + +// QueryResource - Executes an Analytics query for data in the context of a resource. Here [https://learn.microsoft.com/azure/azure-monitor/logs/api/azure-resource-queries] +// is an example for using POST with an Analytics +// query. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-10-27 +// - resourceID - The identifier of the resource. +// - body - The Analytics query. Learn more about the Analytics query syntax [https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/] +// - options - LogsClientQueryResourceOptions contains the optional parameters for the LogsClient.QueryResource method. +func (client *LogsClient) QueryResource(ctx context.Context, resourceID string, body Body, options *LogsClientQueryResourceOptions) (LogsClientQueryResourceResponse, error) { + req, err := client.queryResourceCreateRequest(ctx, resourceID, body, options) + if err != nil { + return LogsClientQueryResourceResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LogsClientQueryResourceResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LogsClientQueryResourceResponse{}, runtime.NewResponseError(resp) + } + return client.queryResourceHandleResponse(resp) +} + +// queryResourceCreateRequest creates the QueryResource request. +func (client *LogsClient) queryResourceCreateRequest(ctx context.Context, resourceID string, body Body, options *LogsClientQueryResourceOptions) (*policy.Request, error) { + urlPath := "/{resourceId}/query" + urlPath = strings.ReplaceAll(urlPath, "{resourceId}", resourceID) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + if options != nil && options.Options != nil { + req.Raw().Header["Prefer"] = []string{options.Options.preferHeader()} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// queryResourceHandleResponse handles the QueryResource response. +func (client *LogsClient) queryResourceHandleResponse(resp *http.Response) (LogsClientQueryResourceResponse, error) { + result := LogsClientQueryResourceResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Results); err != nil { + return LogsClientQueryResourceResponse{}, err + } + return result, nil +} + +// QueryWorkspace - Executes an Analytics query for data. Here [https://learn.microsoft.com/azure/azure-monitor/logs/api/request-format] +// is an example for using POST with an Analytics query. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-10-27 +// - workspaceID - Primary Workspace ID of the query. This is the Workspace ID from the Properties blade in the Azure portal. +// - body - The Analytics query. Learn more about the Analytics query syntax [https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/] +// - options - LogsClientQueryWorkspaceOptions contains the optional parameters for the LogsClient.QueryWorkspace method. +func (client *LogsClient) QueryWorkspace(ctx context.Context, workspaceID string, body Body, options *LogsClientQueryWorkspaceOptions) (LogsClientQueryWorkspaceResponse, error) { + req, err := client.queryWorkspaceCreateRequest(ctx, workspaceID, body, options) + if err != nil { + return LogsClientQueryWorkspaceResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LogsClientQueryWorkspaceResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LogsClientQueryWorkspaceResponse{}, runtime.NewResponseError(resp) + } + return client.queryWorkspaceHandleResponse(resp) +} + +// queryWorkspaceCreateRequest creates the QueryWorkspace request. +func (client *LogsClient) queryWorkspaceCreateRequest(ctx context.Context, workspaceID string, body Body, options *LogsClientQueryWorkspaceOptions) (*policy.Request, error) { + urlPath := "/workspaces/{workspaceId}/query" + if workspaceID == "" { + return nil, errors.New("parameter workspaceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{workspaceId}", url.PathEscape(workspaceID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + if options != nil && options.Options != nil { + req.Raw().Header["Prefer"] = []string{options.Options.preferHeader()} + } + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, body) +} + +// queryWorkspaceHandleResponse handles the QueryWorkspace response. +func (client *LogsClient) queryWorkspaceHandleResponse(resp *http.Response) (LogsClientQueryWorkspaceResponse, error) { + result := LogsClientQueryWorkspaceResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Results); err != nil { + return LogsClientQueryWorkspaceResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/metrics_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/metrics_client.go new file mode 100644 index 00000000000..093e93f6190 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/metrics_client.go @@ -0,0 +1,209 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azquery + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" +) + +// MetricsClient contains the methods for the Metrics group. +// Don't use this type directly, use a constructor function instead. +type MetricsClient struct { + host string + internal *azcore.Client +} + +// NewListDefinitionsPager - Lists the metric definitions for the resource. +// +// Generated from API version 2018-01-01 +// - resourceURI - The identifier of the resource. +// - options - MetricsClientListDefinitionsOptions contains the optional parameters for the MetricsClient.NewListDefinitionsPager +// method. +func (client *MetricsClient) NewListDefinitionsPager(resourceURI string, options *MetricsClientListDefinitionsOptions) *runtime.Pager[MetricsClientListDefinitionsResponse] { + return runtime.NewPager(runtime.PagingHandler[MetricsClientListDefinitionsResponse]{ + More: func(page MetricsClientListDefinitionsResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *MetricsClientListDefinitionsResponse) (MetricsClientListDefinitionsResponse, error) { + req, err := client.listDefinitionsCreateRequest(ctx, resourceURI, options) + if err != nil { + return MetricsClientListDefinitionsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return MetricsClientListDefinitionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return MetricsClientListDefinitionsResponse{}, runtime.NewResponseError(resp) + } + return client.listDefinitionsHandleResponse(resp) + }, + }) +} + +// listDefinitionsCreateRequest creates the ListDefinitions request. +func (client *MetricsClient) listDefinitionsCreateRequest(ctx context.Context, resourceURI string, options *MetricsClientListDefinitionsOptions) (*policy.Request, error) { + urlPath := "/{resourceUri}/providers/Microsoft.Insights/metricDefinitions" + urlPath = strings.ReplaceAll(urlPath, "{resourceUri}", resourceURI) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2018-01-01") + if options != nil && options.MetricNamespace != nil { + reqQP.Set("metricnamespace", *options.MetricNamespace) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listDefinitionsHandleResponse handles the ListDefinitions response. +func (client *MetricsClient) listDefinitionsHandleResponse(resp *http.Response) (MetricsClientListDefinitionsResponse, error) { + result := MetricsClientListDefinitionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.MetricDefinitionCollection); err != nil { + return MetricsClientListDefinitionsResponse{}, err + } + return result, nil +} + +// NewListNamespacesPager - Lists the metric namespaces for the resource. +// +// Generated from API version 2017-12-01-preview +// - resourceURI - The identifier of the resource. +// - options - MetricsClientListNamespacesOptions contains the optional parameters for the MetricsClient.NewListNamespacesPager +// method. +func (client *MetricsClient) NewListNamespacesPager(resourceURI string, options *MetricsClientListNamespacesOptions) *runtime.Pager[MetricsClientListNamespacesResponse] { + return runtime.NewPager(runtime.PagingHandler[MetricsClientListNamespacesResponse]{ + More: func(page MetricsClientListNamespacesResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *MetricsClientListNamespacesResponse) (MetricsClientListNamespacesResponse, error) { + req, err := client.listNamespacesCreateRequest(ctx, resourceURI, options) + if err != nil { + return MetricsClientListNamespacesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return MetricsClientListNamespacesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return MetricsClientListNamespacesResponse{}, runtime.NewResponseError(resp) + } + return client.listNamespacesHandleResponse(resp) + }, + }) +} + +// listNamespacesCreateRequest creates the ListNamespaces request. +func (client *MetricsClient) listNamespacesCreateRequest(ctx context.Context, resourceURI string, options *MetricsClientListNamespacesOptions) (*policy.Request, error) { + urlPath := "/{resourceUri}/providers/microsoft.insights/metricNamespaces" + urlPath = strings.ReplaceAll(urlPath, "{resourceUri}", resourceURI) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2017-12-01-preview") + if options != nil && options.StartTime != nil { + reqQP.Set("startTime", *options.StartTime) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listNamespacesHandleResponse handles the ListNamespaces response. +func (client *MetricsClient) listNamespacesHandleResponse(resp *http.Response) (MetricsClientListNamespacesResponse, error) { + result := MetricsClientListNamespacesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.MetricNamespaceCollection); err != nil { + return MetricsClientListNamespacesResponse{}, err + } + return result, nil +} + +// QueryResource - Lists the metric values for a resource. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-01-01 +// - resourceURI - The identifier of the resource. +// - options - MetricsClientQueryResourceOptions contains the optional parameters for the MetricsClient.QueryResource method. +func (client *MetricsClient) QueryResource(ctx context.Context, resourceURI string, options *MetricsClientQueryResourceOptions) (MetricsClientQueryResourceResponse, error) { + req, err := client.queryResourceCreateRequest(ctx, resourceURI, options) + if err != nil { + return MetricsClientQueryResourceResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return MetricsClientQueryResourceResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return MetricsClientQueryResourceResponse{}, runtime.NewResponseError(resp) + } + return client.queryResourceHandleResponse(resp) +} + +// queryResourceCreateRequest creates the QueryResource request. +func (client *MetricsClient) queryResourceCreateRequest(ctx context.Context, resourceURI string, options *MetricsClientQueryResourceOptions) (*policy.Request, error) { + urlPath := "/{resourceUri}/providers/Microsoft.Insights/metrics" + urlPath = strings.ReplaceAll(urlPath, "{resourceUri}", resourceURI) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timespan != nil { + reqQP.Set("timespan", string(*options.Timespan)) + } + if options != nil && options.Interval != nil { + reqQP.Set("interval", *options.Interval) + } + if options != nil && options.MetricNames != nil { + reqQP.Set("metricnames", *options.MetricNames) + } + if options != nil && options.Aggregation != nil { + reqQP.Set("aggregation", aggregationTypeToString(options.Aggregation)) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.OrderBy != nil { + reqQP.Set("orderby", *options.OrderBy) + } + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + if options != nil && options.ResultType != nil { + reqQP.Set("resultType", string(*options.ResultType)) + } + reqQP.Set("api-version", "2018-01-01") + if options != nil && options.MetricNamespace != nil { + reqQP.Set("metricnamespace", *options.MetricNamespace) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// queryResourceHandleResponse handles the QueryResource response. +func (client *MetricsClient) queryResourceHandleResponse(resp *http.Response) (MetricsClientQueryResourceResponse, error) { + result := MetricsClientQueryResourceResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Response); err != nil { + return MetricsClientQueryResourceResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models.go new file mode 100644 index 00000000000..1fc49eeeb8f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models.go @@ -0,0 +1,371 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azquery + +import "time" + +// BatchQueryRequest - An single request in a batch. +type BatchQueryRequest struct { + // REQUIRED; The Analytics query. Learn more about the Analytics query syntax [https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/] + Body *Body `json:"body,omitempty"` + + // REQUIRED; Unique ID corresponding to each request in the batch. + CorrelationID *string `json:"id,omitempty"` + + // REQUIRED; Primary Workspace ID of the query. This is the Workspace ID from the Properties blade in the Azure portal. + WorkspaceID *string `json:"workspace,omitempty"` + + // Headers of the request. Can use prefer header to set server timeout and to query statistics and visualization information. + Headers map[string]*string `json:"headers,omitempty"` + + // The method of a single request in a batch, defaults to POST + Method *BatchQueryRequestMethod `json:"method,omitempty"` + + // The query path of a single request in a batch, defaults to /query + Path *BatchQueryRequestPath `json:"path,omitempty"` +} + +// BatchQueryResponse - Contains the batch query response and the headers, id, and status of the request +type BatchQueryResponse struct { + // Contains the tables, columns & rows resulting from a query. + Body *BatchQueryResults `json:"body,omitempty"` + CorrelationID *string `json:"id,omitempty"` + + // Dictionary of + Headers map[string]*string `json:"headers,omitempty"` + Status *int32 `json:"status,omitempty"` +} + +// BatchQueryResults - Contains the tables, columns & rows resulting from a query. +type BatchQueryResults struct { + // The code and message for an error. + Error *ErrorInfo `json:"error,omitempty"` + + // Statistics represented in JSON format. + Statistics []byte `json:"statistics,omitempty"` + + // The results of the query in tabular format. + Tables []*Table `json:"tables,omitempty"` + + // Visualization data in JSON format. + Visualization []byte `json:"render,omitempty"` +} + +// BatchRequest - An array of requests. +type BatchRequest struct { + // REQUIRED; An single request in a batch. + Requests []*BatchQueryRequest `json:"requests,omitempty"` +} + +// BatchResponse - Response to a batch query. +type BatchResponse struct { + // An array of responses corresponding to each individual request in a batch. + Responses []*BatchQueryResponse `json:"responses,omitempty"` +} + +// Body - The Analytics query. Learn more about the Analytics query syntax [https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/] +type Body struct { + // REQUIRED; The query to execute. + Query *string `json:"query,omitempty"` + + // A list of workspaces to query in addition to the primary workspace. + AdditionalWorkspaces []*string `json:"workspaces,omitempty"` + + // Optional. The timespan over which to query data. This is an ISO8601 time period value. This timespan is applied in addition + // to any that are specified in the query expression. + Timespan *TimeInterval `json:"timespan,omitempty"` +} + +// Column - A column in a table. +type Column struct { + // The name of this column. + Name *string `json:"name,omitempty"` + + // The data type of this column. + Type *LogsColumnType `json:"type,omitempty"` +} + +// LocalizableString - The localizable string class. +type LocalizableString struct { + // REQUIRED; the invariant value. + Value *string `json:"value,omitempty"` + + // the locale specific value. + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// LogsClientQueryBatchOptions contains the optional parameters for the LogsClient.QueryBatch method. +type LogsClientQueryBatchOptions struct { + // placeholder for future optional parameters +} + +// LogsClientQueryResourceOptions contains the optional parameters for the LogsClient.QueryResource method. +type LogsClientQueryResourceOptions struct { + // Optional. The prefer header to set server timeout, query statistics and visualization information. + Options *LogsQueryOptions +} + +// LogsClientQueryWorkspaceOptions contains the optional parameters for the LogsClient.QueryWorkspace method. +type LogsClientQueryWorkspaceOptions struct { + // Optional. The prefer header to set server timeout, query statistics and visualization information. + Options *LogsQueryOptions +} + +// MetadataValue - Represents a metric metadata value. +type MetadataValue struct { + // the name of the metadata. + Name *LocalizableString `json:"name,omitempty"` + + // the value of the metadata. + Value *string `json:"value,omitempty"` +} + +// Metric - The result data of a query. +type Metric struct { + // REQUIRED; the metric Id. + ID *string `json:"id,omitempty"` + + // REQUIRED; the name and the display name of the metric, i.e. it is localizable string. + Name *LocalizableString `json:"name,omitempty"` + + // REQUIRED; the time series returned when a data query is performed. + TimeSeries []*TimeSeriesElement `json:"timeseries,omitempty"` + + // REQUIRED; the resource type of the metric resource. + Type *string `json:"type,omitempty"` + + // REQUIRED; The unit of the metric. + Unit *MetricUnit `json:"unit,omitempty"` + + // Detailed description of this metric. + DisplayDescription *string `json:"displayDescription,omitempty"` + + // 'Success' or the error details on query failures for this metric. + ErrorCode *string `json:"errorCode,omitempty"` + + // Error message encountered querying this specific metric. + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +// MetricAvailability - Metric availability specifies the time grain (aggregation interval or frequency) and the retention +// period for that time grain. +type MetricAvailability struct { + // the retention period for the metric at the specified timegrain. Expressed as a duration 'PT1M', 'P1D', etc. + Retention *string `json:"retention,omitempty"` + + // the time grain specifies the aggregation interval for the metric. Expressed as a duration 'PT1M', 'P1D', etc. + TimeGrain *string `json:"timeGrain,omitempty"` +} + +// MetricDefinition - Metric definition class specifies the metadata for a metric. +type MetricDefinition struct { + // Custom category name for this metric. + Category *string `json:"category,omitempty"` + + // the name and the display name of the dimension, i.e. it is a localizable string. + Dimensions []*LocalizableString `json:"dimensions,omitempty"` + + // Detailed description of this metric. + DisplayDescription *string `json:"displayDescription,omitempty"` + + // the resource identifier of the metric definition. + ID *string `json:"id,omitempty"` + + // Flag to indicate whether the dimension is required. + IsDimensionRequired *bool `json:"isDimensionRequired,omitempty"` + + // the collection of what aggregation intervals are available to be queried. + MetricAvailabilities []*MetricAvailability `json:"metricAvailabilities,omitempty"` + + // The class of the metric. + MetricClass *MetricClass `json:"metricClass,omitempty"` + + // the name and the display name of the metric, i.e. it is a localizable string. + Name *LocalizableString `json:"name,omitempty"` + + // the namespace the metric belongs to. + Namespace *string `json:"namespace,omitempty"` + + // the primary aggregation type value defining how to use the values for display. + PrimaryAggregationType *AggregationType `json:"primaryAggregationType,omitempty"` + + // the resource identifier of the resource that emitted the metric. + ResourceID *string `json:"resourceId,omitempty"` + + // the collection of what aggregation types are supported. + SupportedAggregationTypes []*AggregationType `json:"supportedAggregationTypes,omitempty"` + + // The unit of the metric. + Unit *MetricUnit `json:"unit,omitempty"` +} + +// MetricDefinitionCollection - Represents collection of metric definitions. +type MetricDefinitionCollection struct { + // REQUIRED; the values for the metric definitions. + Value []*MetricDefinition `json:"value,omitempty"` +} + +// MetricNamespace - Metric namespace class specifies the metadata for a metric namespace. +type MetricNamespace struct { + // Kind of namespace + Classification *NamespaceClassification `json:"classification,omitempty"` + + // The ID of the metric namespace. + ID *string `json:"id,omitempty"` + + // The escaped name of the namespace. + Name *string `json:"name,omitempty"` + + // Properties which include the fully qualified namespace name. + Properties *MetricNamespaceName `json:"properties,omitempty"` + + // The type of the namespace. + Type *string `json:"type,omitempty"` +} + +// MetricNamespaceCollection - Represents collection of metric namespaces. +type MetricNamespaceCollection struct { + // REQUIRED; The values for the metric namespaces. + Value []*MetricNamespace `json:"value,omitempty"` +} + +// MetricNamespaceName - The fully qualified metric namespace name. +type MetricNamespaceName struct { + // The metric namespace name. + MetricNamespaceName *string `json:"metricNamespaceName,omitempty"` +} + +// MetricValue - Represents a metric value. +type MetricValue struct { + // REQUIRED; the timestamp for the metric value in ISO 8601 format. + TimeStamp *time.Time `json:"timeStamp,omitempty"` + + // the average value in the time range. + Average *float64 `json:"average,omitempty"` + + // the number of samples in the time range. Can be used to determine the number of values that contributed to the average + // value. + Count *float64 `json:"count,omitempty"` + + // the greatest value in the time range. + Maximum *float64 `json:"maximum,omitempty"` + + // the least value in the time range. + Minimum *float64 `json:"minimum,omitempty"` + + // the sum of all of the values in the time range. + Total *float64 `json:"total,omitempty"` +} + +// MetricsClientListDefinitionsOptions contains the optional parameters for the MetricsClient.NewListDefinitionsPager method. +type MetricsClientListDefinitionsOptions struct { + // Metric namespace to query metric definitions for. + MetricNamespace *string +} + +// MetricsClientListNamespacesOptions contains the optional parameters for the MetricsClient.NewListNamespacesPager method. +type MetricsClientListNamespacesOptions struct { + // The ISO 8601 conform Date start time from which to query for metric namespaces. + StartTime *string +} + +// MetricsClientQueryResourceOptions contains the optional parameters for the MetricsClient.QueryResource method. +type MetricsClientQueryResourceOptions struct { + // The list of aggregation types to retrieve + Aggregation []*AggregationType + // The $filter is used to reduce the set of metric data returned. Example: Metric contains metadata A, B and C. - Return all + // time series of C where A = a1 and B = b1 or b2 $filter=A eq 'a1' and B eq 'b1' + // or B eq 'b2' and C eq '' - Invalid variant: $filter=A eq 'a1' and B eq 'b1' and C eq '' or B = 'b2' This is invalid because + // the logical or operator cannot separate two different metadata names. - + // Return all time series where A = a1, B = b1 and C = c1: $filter=A eq 'a1' and B eq 'b1' and C eq 'c1' - Return all time + // series where A = a1 $filter=A eq 'a1' and B eq '' and C eq ''. Special case: + // When dimension name or dimension value uses round brackets. Eg: When dimension name is dim (test) 1 Instead of using $filter= + // "dim (test) 1 eq '' " use $filter= "dim %2528test%2529 1 eq '' " When + // dimension name is dim (test) 3 and dimension value is dim3 (test) val Instead of using $filter= "dim (test) 3 eq 'dim3 + // (test) val' " use $filter= "dim %2528test%2529 3 eq 'dim3 %2528test%2529 val' " + Filter *string + // The interval (i.e. timegrain) of the query. + Interval *string + // The names of the metrics (comma separated) to retrieve. Special case: If a metricname itself has a comma in it then use + // %2 to indicate it. Eg: 'Metric,Name1' should be 'Metric%2Name1' + MetricNames *string + // Metric namespace to query metric definitions for. + MetricNamespace *string + // The aggregation to use for sorting results and the direction of the sort. Only one order can be specified. Examples: sum + // asc. + OrderBy *string + // Reduces the set of data collected. The syntax allowed depends on the operation. See the operation's description for details. + ResultType *ResultType + // The timespan of the query. It is a string with the following format 'startDateTimeISO/endDateTimeISO'. + Timespan *TimeInterval + // The maximum number of records to retrieve. Valid only if $filter is specified. Defaults to 10. + Top *int32 +} + +// Response - The response to a metrics query. +type Response struct { + // REQUIRED; The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by + // '/'. This may be adjusted in the future and returned back from what was originally + // requested. + Timespan *TimeInterval `json:"timespan,omitempty"` + + // REQUIRED; the value of the collection. + Value []*Metric `json:"value,omitempty"` + + // The integer value representing the relative cost of the query. + Cost *int32 `json:"cost,omitempty"` + + // The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back + // from what was originally requested. This is not present if a metadata request + // was made. + Interval *string `json:"interval,omitempty"` + + // The namespace of the metrics being queried + Namespace *string `json:"namespace,omitempty"` + + // The region of the resource being queried for metrics. + ResourceRegion *string `json:"resourceregion,omitempty"` +} + +// Results - Contains the tables, columns & rows resulting from a query. +type Results struct { + // REQUIRED; The results of the query in tabular format. + Tables []*Table `json:"tables,omitempty"` + + // The code and message for an error. + Error *ErrorInfo `json:"error,omitempty"` + + // Statistics represented in JSON format. + Statistics []byte `json:"statistics,omitempty"` + + // Visualization data in JSON format. + Visualization []byte `json:"render,omitempty"` +} + +// Table - Contains the columns and rows for one table in a query response. +type Table struct { + // REQUIRED; The list of columns in this table. + Columns []*Column `json:"columns,omitempty"` + + // REQUIRED; The name of the table. + Name *string `json:"name,omitempty"` + + // REQUIRED; The resulting rows from this query. + Rows []Row `json:"rows,omitempty"` +} + +// TimeSeriesElement - A time series result type. The discriminator value is always TimeSeries in this case. +type TimeSeriesElement struct { + // An array of data points representing the metric values. This is only returned if a result type of data is specified. + Data []*MetricValue `json:"data,omitempty"` + + // the metadata values returned if $filter was specified in the call. + MetadataValues []*MetadataValue `json:"metadatavalues,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models_serde.go new file mode 100644 index 00000000000..26af83bea4e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/models_serde.go @@ -0,0 +1,835 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azquery + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type BatchQueryRequest. +func (b BatchQueryRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "body", b.Body) + populate(objectMap, "id", b.CorrelationID) + populate(objectMap, "headers", b.Headers) + if b.Method == nil { + b.Method = to.Ptr(BatchQueryRequestMethodPOST) + } + populate(objectMap, "method", b.Method) + if b.Path == nil { + b.Path = to.Ptr(BatchQueryRequestPathQuery) + } + populate(objectMap, "path", b.Path) + populate(objectMap, "workspace", b.WorkspaceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BatchQueryRequest. +func (b *BatchQueryRequest) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "body": + err = unpopulate(val, "Body", &b.Body) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "CorrelationID", &b.CorrelationID) + delete(rawMsg, key) + case "headers": + err = unpopulate(val, "Headers", &b.Headers) + delete(rawMsg, key) + case "method": + err = unpopulate(val, "Method", &b.Method) + delete(rawMsg, key) + case "path": + err = unpopulate(val, "Path", &b.Path) + delete(rawMsg, key) + case "workspace": + err = unpopulate(val, "WorkspaceID", &b.WorkspaceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type BatchQueryResponse. +func (b BatchQueryResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "body", b.Body) + populate(objectMap, "id", b.CorrelationID) + populate(objectMap, "headers", b.Headers) + populate(objectMap, "status", b.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BatchQueryResponse. +func (b *BatchQueryResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "body": + err = unpopulate(val, "Body", &b.Body) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "CorrelationID", &b.CorrelationID) + delete(rawMsg, key) + case "headers": + err = unpopulate(val, "Headers", &b.Headers) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &b.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type BatchQueryResults. +func (b BatchQueryResults) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "error", b.Error) + populate(objectMap, "statistics", json.RawMessage(b.Statistics)) + populate(objectMap, "tables", b.Tables) + populate(objectMap, "render", json.RawMessage(b.Visualization)) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BatchQueryResults. +func (b *BatchQueryResults) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "error": + err = unpopulate(val, "Error", &b.Error) + delete(rawMsg, key) + case "statistics": + b.Statistics = val + delete(rawMsg, key) + case "tables": + err = unpopulate(val, "Tables", &b.Tables) + delete(rawMsg, key) + case "render": + b.Visualization = val + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type BatchRequest. +func (b BatchRequest) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "requests", b.Requests) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BatchRequest. +func (b *BatchRequest) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "requests": + err = unpopulate(val, "Requests", &b.Requests) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type BatchResponse. +func (b BatchResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "responses", b.Responses) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BatchResponse. +func (b *BatchResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "responses": + err = unpopulate(val, "Responses", &b.Responses) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Body. +func (b Body) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "workspaces", b.AdditionalWorkspaces) + populate(objectMap, "query", b.Query) + populate(objectMap, "timespan", b.Timespan) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Body. +func (b *Body) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "workspaces": + err = unpopulate(val, "AdditionalWorkspaces", &b.AdditionalWorkspaces) + delete(rawMsg, key) + case "query": + err = unpopulate(val, "Query", &b.Query) + delete(rawMsg, key) + case "timespan": + err = unpopulate(val, "Timespan", &b.Timespan) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Column. +func (c Column) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "name", c.Name) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Column. +func (c *Column) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LocalizableString. +func (l LocalizableString) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "localizedValue", l.LocalizedValue) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LocalizableString. +func (l *LocalizableString) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "localizedValue": + err = unpopulate(val, "LocalizedValue", &l.LocalizedValue) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetadataValue. +func (m MetadataValue) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "name", m.Name) + populate(objectMap, "value", m.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetadataValue. +func (m *MetadataValue) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "name": + err = unpopulate(val, "Name", &m.Name) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &m.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Metric. +func (m Metric) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "displayDescription", m.DisplayDescription) + populate(objectMap, "errorCode", m.ErrorCode) + populate(objectMap, "errorMessage", m.ErrorMessage) + populate(objectMap, "id", m.ID) + populate(objectMap, "name", m.Name) + populate(objectMap, "timeseries", m.TimeSeries) + populate(objectMap, "type", m.Type) + populate(objectMap, "unit", m.Unit) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Metric. +func (m *Metric) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "displayDescription": + err = unpopulate(val, "DisplayDescription", &m.DisplayDescription) + delete(rawMsg, key) + case "errorCode": + err = unpopulate(val, "ErrorCode", &m.ErrorCode) + delete(rawMsg, key) + case "errorMessage": + err = unpopulate(val, "ErrorMessage", &m.ErrorMessage) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &m.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &m.Name) + delete(rawMsg, key) + case "timeseries": + err = unpopulate(val, "TimeSeries", &m.TimeSeries) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &m.Type) + delete(rawMsg, key) + case "unit": + err = unpopulate(val, "Unit", &m.Unit) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetricAvailability. +func (m MetricAvailability) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "retention", m.Retention) + populate(objectMap, "timeGrain", m.TimeGrain) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetricAvailability. +func (m *MetricAvailability) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "retention": + err = unpopulate(val, "Retention", &m.Retention) + delete(rawMsg, key) + case "timeGrain": + err = unpopulate(val, "TimeGrain", &m.TimeGrain) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetricDefinition. +func (m MetricDefinition) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "category", m.Category) + populate(objectMap, "dimensions", m.Dimensions) + populate(objectMap, "displayDescription", m.DisplayDescription) + populate(objectMap, "id", m.ID) + populate(objectMap, "isDimensionRequired", m.IsDimensionRequired) + populate(objectMap, "metricAvailabilities", m.MetricAvailabilities) + populate(objectMap, "metricClass", m.MetricClass) + populate(objectMap, "name", m.Name) + populate(objectMap, "namespace", m.Namespace) + populate(objectMap, "primaryAggregationType", m.PrimaryAggregationType) + populate(objectMap, "resourceId", m.ResourceID) + populate(objectMap, "supportedAggregationTypes", m.SupportedAggregationTypes) + populate(objectMap, "unit", m.Unit) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetricDefinition. +func (m *MetricDefinition) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "category": + err = unpopulate(val, "Category", &m.Category) + delete(rawMsg, key) + case "dimensions": + err = unpopulate(val, "Dimensions", &m.Dimensions) + delete(rawMsg, key) + case "displayDescription": + err = unpopulate(val, "DisplayDescription", &m.DisplayDescription) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &m.ID) + delete(rawMsg, key) + case "isDimensionRequired": + err = unpopulate(val, "IsDimensionRequired", &m.IsDimensionRequired) + delete(rawMsg, key) + case "metricAvailabilities": + err = unpopulate(val, "MetricAvailabilities", &m.MetricAvailabilities) + delete(rawMsg, key) + case "metricClass": + err = unpopulate(val, "MetricClass", &m.MetricClass) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &m.Name) + delete(rawMsg, key) + case "namespace": + err = unpopulate(val, "Namespace", &m.Namespace) + delete(rawMsg, key) + case "primaryAggregationType": + err = unpopulate(val, "PrimaryAggregationType", &m.PrimaryAggregationType) + delete(rawMsg, key) + case "resourceId": + err = unpopulate(val, "ResourceID", &m.ResourceID) + delete(rawMsg, key) + case "supportedAggregationTypes": + err = unpopulate(val, "SupportedAggregationTypes", &m.SupportedAggregationTypes) + delete(rawMsg, key) + case "unit": + err = unpopulate(val, "Unit", &m.Unit) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetricDefinitionCollection. +func (m MetricDefinitionCollection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", m.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetricDefinitionCollection. +func (m *MetricDefinitionCollection) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &m.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetricNamespace. +func (m MetricNamespace) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "classification", m.Classification) + populate(objectMap, "id", m.ID) + populate(objectMap, "name", m.Name) + populate(objectMap, "properties", m.Properties) + populate(objectMap, "type", m.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetricNamespace. +func (m *MetricNamespace) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "classification": + err = unpopulate(val, "Classification", &m.Classification) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &m.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &m.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &m.Properties) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &m.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetricNamespaceCollection. +func (m MetricNamespaceCollection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", m.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetricNamespaceCollection. +func (m *MetricNamespaceCollection) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &m.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetricNamespaceName. +func (m MetricNamespaceName) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "metricNamespaceName", m.MetricNamespaceName) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetricNamespaceName. +func (m *MetricNamespaceName) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "metricNamespaceName": + err = unpopulate(val, "MetricNamespaceName", &m.MetricNamespaceName) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MetricValue. +func (m MetricValue) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "average", m.Average) + populate(objectMap, "count", m.Count) + populate(objectMap, "maximum", m.Maximum) + populate(objectMap, "minimum", m.Minimum) + populateTimeRFC3339(objectMap, "timeStamp", m.TimeStamp) + populate(objectMap, "total", m.Total) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MetricValue. +func (m *MetricValue) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "average": + err = unpopulate(val, "Average", &m.Average) + delete(rawMsg, key) + case "count": + err = unpopulate(val, "Count", &m.Count) + delete(rawMsg, key) + case "maximum": + err = unpopulate(val, "Maximum", &m.Maximum) + delete(rawMsg, key) + case "minimum": + err = unpopulate(val, "Minimum", &m.Minimum) + delete(rawMsg, key) + case "timeStamp": + err = unpopulateTimeRFC3339(val, "TimeStamp", &m.TimeStamp) + delete(rawMsg, key) + case "total": + err = unpopulate(val, "Total", &m.Total) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Response. +func (r Response) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "cost", r.Cost) + populate(objectMap, "interval", r.Interval) + populate(objectMap, "namespace", r.Namespace) + populate(objectMap, "resourceregion", r.ResourceRegion) + populate(objectMap, "timespan", r.Timespan) + populate(objectMap, "value", r.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Response. +func (r *Response) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "cost": + err = unpopulate(val, "Cost", &r.Cost) + delete(rawMsg, key) + case "interval": + err = unpopulate(val, "Interval", &r.Interval) + delete(rawMsg, key) + case "namespace": + err = unpopulate(val, "Namespace", &r.Namespace) + delete(rawMsg, key) + case "resourceregion": + err = unpopulate(val, "ResourceRegion", &r.ResourceRegion) + delete(rawMsg, key) + case "timespan": + err = unpopulate(val, "Timespan", &r.Timespan) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &r.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Results. +func (r Results) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "error", r.Error) + populate(objectMap, "statistics", json.RawMessage(r.Statistics)) + populate(objectMap, "tables", r.Tables) + populate(objectMap, "render", json.RawMessage(r.Visualization)) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Results. +func (r *Results) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "error": + err = unpopulate(val, "Error", &r.Error) + delete(rawMsg, key) + case "statistics": + r.Statistics = val + delete(rawMsg, key) + case "tables": + err = unpopulate(val, "Tables", &r.Tables) + delete(rawMsg, key) + case "render": + r.Visualization = val + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Table. +func (t Table) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "columns", t.Columns) + populate(objectMap, "name", t.Name) + populate(objectMap, "rows", t.Rows) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Table. +func (t *Table) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "columns": + err = unpopulate(val, "Columns", &t.Columns) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &t.Name) + delete(rawMsg, key) + case "rows": + err = unpopulate(val, "Rows", &t.Rows) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type TimeSeriesElement. +func (t TimeSeriesElement) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "data", t.Data) + populate(objectMap, "metadatavalues", t.MetadataValues) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TimeSeriesElement. +func (t *TimeSeriesElement) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "data": + err = unpopulate(val, "Data", &t.Data) + delete(rawMsg, key) + case "metadatavalues": + err = unpopulate(val, "MetadataValues", &t.MetadataValues) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/response_types.go new file mode 100644 index 00000000000..790850e3630 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/response_types.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azquery + +// LogsClientQueryBatchResponse contains the response from method LogsClient.QueryBatch. +type LogsClientQueryBatchResponse struct { + BatchResponse +} + +// LogsClientQueryResourceResponse contains the response from method LogsClient.QueryResource. +type LogsClientQueryResourceResponse struct { + Results +} + +// LogsClientQueryWorkspaceResponse contains the response from method LogsClient.QueryWorkspace. +type LogsClientQueryWorkspaceResponse struct { + Results +} + +// MetricsClientListDefinitionsResponse contains the response from method MetricsClient.NewListDefinitionsPager. +type MetricsClientListDefinitionsResponse struct { + MetricDefinitionCollection +} + +// MetricsClientListNamespacesResponse contains the response from method MetricsClient.NewListNamespacesPager. +type MetricsClientListNamespacesResponse struct { + MetricNamespaceCollection +} + +// MetricsClientQueryResourceResponse contains the response from method MetricsClient.QueryResource. +type MetricsClientQueryResourceResponse struct { + Response +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/test-resources.bicep new file mode 100644 index 00000000000..240b023e074 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/test-resources.bicep @@ -0,0 +1,50 @@ +param baseName string +param sku string = 'pergb2018' +param appSku string = 'standard' +param retentionInDays int = 30 +param resourcePermissions bool = false +param location string = resourceGroup().location + +resource log_analytics1 'Microsoft.OperationalInsights/workspaces@2020-08-01' = { + name: '${baseName}1' + location: location + properties: { + sku: { + name: sku + } + retentionInDays: retentionInDays + features: { + searchVersion: 1 + legacy: 0 + enableLogAccessUsingOnlyResourcePermissions: resourcePermissions + } + } +} + +resource log_analytics2 'Microsoft.OperationalInsights/workspaces@2020-08-01' = { + name: '${baseName}2' + location: location + properties: { + sku: { + name: sku + } + retentionInDays: retentionInDays + features: { + searchVersion: 1 + legacy: 0 + enableLogAccessUsingOnlyResourcePermissions: resourcePermissions + } + } +} + +resource app_config 'Microsoft.AppConfiguration/configurationStores@2022-05-01' = { + name: baseName + location: location + sku: { + name: appSku + } +} + +output WORKSPACE_ID string = log_analytics1.properties.customerId +output WORKSPACE_ID2 string = log_analytics2.properties.customerId +output RESOURCE_URI string = app_config.id diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/time_rfc3339.go new file mode 100644 index 00000000000..481f0370775 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/time_rfc3339.go @@ -0,0 +1,87 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package azquery + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} + +func populateTimeRFC3339(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeRFC3339)(t) +} + +func unpopulateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeRFC3339 + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/version.go new file mode 100644 index 00000000000..59ca11d98ff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azquery + +const ( + moduleName = "azquery" + version = "v1.1.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/CHANGELOG.md new file mode 100644 index 00000000000..0b9d12f5f59 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/CHANGELOG.md @@ -0,0 +1,75 @@ +# Release History + +## 1.2.0 (2023-11-24) +### Features Added + +- Support for test fakes and OpenTelemetry trace spans. + + +## 1.2.0-beta.1 (2023-04-28) +### Features Added + +- New value `PublicNetworkAccessFlagSecuredByPerimeter` added to enum type `PublicNetworkAccessFlag` +- New enum type `ApplicationGroupPolicyType` with values `ApplicationGroupPolicyTypeThrottlingPolicy` +- New enum type `CleanupPolicyRetentionDescription` with values `CleanupPolicyRetentionDescriptionCompaction`, `CleanupPolicyRetentionDescriptionDelete` +- New enum type `MetricID` with values `MetricIDIncomingBytes`, `MetricIDIncomingMessages`, `MetricIDOutgoingBytes`, `MetricIDOutgoingMessages` +- New enum type `NetworkSecurityPerimeterConfigurationProvisioningState` with values `NetworkSecurityPerimeterConfigurationProvisioningStateAccepted`, `NetworkSecurityPerimeterConfigurationProvisioningStateCanceled`, `NetworkSecurityPerimeterConfigurationProvisioningStateCreating`, `NetworkSecurityPerimeterConfigurationProvisioningStateDeleted`, `NetworkSecurityPerimeterConfigurationProvisioningStateDeleting`, `NetworkSecurityPerimeterConfigurationProvisioningStateFailed`, `NetworkSecurityPerimeterConfigurationProvisioningStateInvalidResponse`, `NetworkSecurityPerimeterConfigurationProvisioningStateSucceeded`, `NetworkSecurityPerimeterConfigurationProvisioningStateSucceededWithIssues`, `NetworkSecurityPerimeterConfigurationProvisioningStateUnknown`, `NetworkSecurityPerimeterConfigurationProvisioningStateUpdating` +- New enum type `NspAccessRuleDirection` with values `NspAccessRuleDirectionInbound`, `NspAccessRuleDirectionOutbound` +- New enum type `PublicNetworkAccess` with values `PublicNetworkAccessDisabled`, `PublicNetworkAccessEnabled`, `PublicNetworkAccessSecuredByPerimeter` +- New enum type `ResourceAssociationAccessMode` with values `ResourceAssociationAccessModeAuditMode`, `ResourceAssociationAccessModeEnforcedMode`, `ResourceAssociationAccessModeLearningMode`, `ResourceAssociationAccessModeNoAssociationMode`, `ResourceAssociationAccessModeUnspecifiedMode` +- New enum type `TLSVersion` with values `TLSVersionOne0`, `TLSVersionOne1`, `TLSVersionOne2` +- New function `NewApplicationGroupClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ApplicationGroupClient, error)` +- New function `*ApplicationGroupClient.CreateOrUpdateApplicationGroup(context.Context, string, string, string, ApplicationGroup, *ApplicationGroupClientCreateOrUpdateApplicationGroupOptions) (ApplicationGroupClientCreateOrUpdateApplicationGroupResponse, error)` +- New function `*ApplicationGroupClient.Delete(context.Context, string, string, string, *ApplicationGroupClientDeleteOptions) (ApplicationGroupClientDeleteResponse, error)` +- New function `*ApplicationGroupClient.Get(context.Context, string, string, string, *ApplicationGroupClientGetOptions) (ApplicationGroupClientGetResponse, error)` +- New function `*ApplicationGroupClient.NewListByNamespacePager(string, string, *ApplicationGroupClientListByNamespaceOptions) *runtime.Pager[ApplicationGroupClientListByNamespaceResponse]` +- New function `*ApplicationGroupPolicy.GetApplicationGroupPolicy() *ApplicationGroupPolicy` +- New function `*ClientFactory.NewApplicationGroupClient() *ApplicationGroupClient` +- New function `*ClientFactory.NewNetworkSecurityPerimeterConfigurationClient() *NetworkSecurityPerimeterConfigurationClient` +- New function `*ClientFactory.NewNetworkSecurityPerimeterConfigurationsClient() *NetworkSecurityPerimeterConfigurationsClient` +- New function `*ThrottlingPolicy.GetApplicationGroupPolicy() *ApplicationGroupPolicy` +- New function `NewNetworkSecurityPerimeterConfigurationClient(string, azcore.TokenCredential, *arm.ClientOptions) (*NetworkSecurityPerimeterConfigurationClient, error)` +- New function `*NetworkSecurityPerimeterConfigurationClient.List(context.Context, string, string, *NetworkSecurityPerimeterConfigurationClientListOptions) (NetworkSecurityPerimeterConfigurationClientListResponse, error)` +- New function `NewNetworkSecurityPerimeterConfigurationsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*NetworkSecurityPerimeterConfigurationsClient, error)` +- New function `*NetworkSecurityPerimeterConfigurationsClient.BeginCreateOrUpdate(context.Context, string, string, string, *NetworkSecurityPerimeterConfigurationsClientBeginCreateOrUpdateOptions) (*runtime.Poller[NetworkSecurityPerimeterConfigurationsClientCreateOrUpdateResponse], error)` +- New struct `ApplicationGroup` +- New struct `ApplicationGroupListResult` +- New struct `ApplicationGroupProperties` +- New struct `NetworkSecurityPerimeter` +- New struct `NetworkSecurityPerimeterConfiguration` +- New struct `NetworkSecurityPerimeterConfigurationList` +- New struct `NetworkSecurityPerimeterConfigurationProperties` +- New struct `NetworkSecurityPerimeterConfigurationPropertiesProfile` +- New struct `NetworkSecurityPerimeterConfigurationPropertiesResourceAssociation` +- New struct `NspAccessRule` +- New struct `NspAccessRuleProperties` +- New struct `NspAccessRulePropertiesSubscriptionsItem` +- New struct `ProvisioningIssue` +- New struct `ProvisioningIssueProperties` +- New struct `RetentionDescription` +- New struct `ThrottlingPolicy` +- New field `SupportsScaling` in struct `ClusterProperties` +- New field `MinimumTLSVersion` in struct `EHNamespaceProperties` +- New field `PublicNetworkAccess` in struct `EHNamespaceProperties` +- New field `RetentionDescription` in struct `Properties` + + +## 1.1.1 (2023-04-14) +### Bug Fixes + +- Fix serialization bug of empty value of `any` type. + + +## 1.1.0 (2023-04-06) +### Features Added + +- New struct `ClientFactory` which is a client factory used to create any client in this module + + +## 1.0.0 (2022-05-16) + +The package of `github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub` is using our [next generation design principles](https://azure.github.io/azure-sdk/general_introduction.html) since version 1.0.0, which contains breaking changes. + +To migrate the existing applications to the latest version, please refer to [Migration Guide](https://aka.ms/azsdk/go/mgmt/migration). + +To learn more, please refer to our documentation [Quick Start](https://aka.ms/azsdk/go/mgmt). \ No newline at end of file diff --git a/vendor/github.com/devigned/tab/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/LICENSE.txt similarity index 93% rename from vendor/github.com/devigned/tab/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/LICENSE.txt index a936fe63525..dc0c2ffb3dc 100644 --- a/vendor/github.com/devigned/tab/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/LICENSE.txt @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2019 David Justice +Copyright (c) Microsoft Corporation. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/README.md new file mode 100644 index 00000000000..1ec4cf18009 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/README.md @@ -0,0 +1,99 @@ +# Azure Event Hubs Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub) + +The `armeventhub` module provides operations for working with Azure Event Hubs. + +[Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/resourcemanager/eventhub/armeventhub) + +# Getting started + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 or above (You could download and install the latest version of Go from [here](https://go.dev/doc/install). It will replace the existing Go on your machine. If you want to install multiple Go versions on the same machine, you could refer this [doc](https://go.dev/doc/manage-install).) + +## Install the package + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Event Hubs module: + +```sh +go get github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub +``` + +## Authorization + +When creating a client, you will need to provide a credential for authenticating with Azure Event Hubs. The `azidentity` module provides facilities for various ways of authenticating with Azure including client/secret, certificate, managed identity, and more. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +``` + +For more information on authentication, please see the documentation for `azidentity` at [pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). + +## Client Factory + +Azure Event Hubs module consists of one or more clients. We provide a client factory which could be used to create any client in this module. + +```go +clientFactory, err := armeventhub.NewClientFactory(, cred, nil) +``` + +You can use `ClientOptions` in package `github.com/Azure/azure-sdk-for-go/sdk/azcore/arm` to set endpoint to connect with public and sovereign clouds as well as Azure Stack. For more information, please see the documentation for `azcore` at [pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +```go +options := arm.ClientOptions { + ClientOptions: azcore.ClientOptions { + Cloud: cloud.AzureChina, + }, +} +clientFactory, err := armeventhub.NewClientFactory(, cred, &options) +``` + +## Clients + +A client groups a set of related APIs, providing access to its functionality. Create one or more clients to access the APIs you require using client factory. + +```go +client := clientFactory.NewNamespacesClient() +``` + +## Fakes + +The fake package contains types used for constructing in-memory fake servers used in unit tests. +This allows writing tests to cover various success/error conditions without the need for connecting to a live service. + +Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. + +## More sample code + +- [Clusters](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/eventhub/clusters) +- [Disaster Recovery Config](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/eventhub/disaster_recovery_config) +- [Event Hub](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/eventhub/eventhub) +- [Namespace](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/eventhub/namespace) + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Event Hubs` label. + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/assets.json new file mode 100644 index 00000000000..8491dfed837 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/resourcemanager/eventhub/armeventhub", + "Tag": "go/resourcemanager/eventhub/armeventhub_fe9dfbbf4c" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/autorest.md new file mode 100644 index 00000000000..7310ab5d20b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/autorest.md @@ -0,0 +1,13 @@ +### AutoRest Configuration + +> see https://aka.ms/autorest + +``` yaml +azure-arm: true +require: +- https://github.com/Azure/azure-rest-api-specs/blob/0cc5e2efd6ffccf30e80d1e150b488dd87198b94/specification/eventhub/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/0cc5e2efd6ffccf30e80d1e150b488dd87198b94/specification/eventhub/resource-manager/readme.go.md +license-header: MICROSOFT_MIT_NO_VERSION +module-version: 1.2.0 +tag: package-2021-11 +``` \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/build.go new file mode 100644 index 00000000000..f332fa759bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/build.go @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// This file enables 'go generate' to regenerate this specific SDK +//go:generate pwsh ../../../../eng/scripts/build.ps1 -skipBuild -cleanGenerated -format -tidy -generate resourcemanager/eventhub/armeventhub + +package armeventhub diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/ci.yml new file mode 100644 index 00000000000..1843c3a2270 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/ci.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/resourcemanager/eventhub/armeventhub/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/resourcemanager/eventhub/armeventhub/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + IncludeRelease: true + ServiceDirectory: 'resourcemanager/eventhub/armeventhub' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/client_factory.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/client_factory.go new file mode 100644 index 00000000000..03c3b0ff36a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/client_factory.go @@ -0,0 +1,99 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" +) + +// ClientFactory is a client factory used to create any client in this module. +// Don't use this type directly, use NewClientFactory instead. +type ClientFactory struct { + subscriptionID string + credential azcore.TokenCredential + options *arm.ClientOptions +} + +// NewClientFactory creates a new instance of ClientFactory with the specified values. +// The parameter values will be propagated to any client created from this factory. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { + _, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + return &ClientFactory{ + subscriptionID: subscriptionID, credential: credential, + options: options.Clone(), + }, nil +} + +// NewClustersClient creates a new instance of ClustersClient. +func (c *ClientFactory) NewClustersClient() *ClustersClient { + subClient, _ := NewClustersClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewConfigurationClient creates a new instance of ConfigurationClient. +func (c *ClientFactory) NewConfigurationClient() *ConfigurationClient { + subClient, _ := NewConfigurationClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewConsumerGroupsClient creates a new instance of ConsumerGroupsClient. +func (c *ClientFactory) NewConsumerGroupsClient() *ConsumerGroupsClient { + subClient, _ := NewConsumerGroupsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewDisasterRecoveryConfigsClient creates a new instance of DisasterRecoveryConfigsClient. +func (c *ClientFactory) NewDisasterRecoveryConfigsClient() *DisasterRecoveryConfigsClient { + subClient, _ := NewDisasterRecoveryConfigsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewEventHubsClient creates a new instance of EventHubsClient. +func (c *ClientFactory) NewEventHubsClient() *EventHubsClient { + subClient, _ := NewEventHubsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewNamespacesClient creates a new instance of NamespacesClient. +func (c *ClientFactory) NewNamespacesClient() *NamespacesClient { + subClient, _ := NewNamespacesClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewOperationsClient creates a new instance of OperationsClient. +func (c *ClientFactory) NewOperationsClient() *OperationsClient { + subClient, _ := NewOperationsClient(c.credential, c.options) + return subClient +} + +// NewPrivateEndpointConnectionsClient creates a new instance of PrivateEndpointConnectionsClient. +func (c *ClientFactory) NewPrivateEndpointConnectionsClient() *PrivateEndpointConnectionsClient { + subClient, _ := NewPrivateEndpointConnectionsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewPrivateLinkResourcesClient creates a new instance of PrivateLinkResourcesClient. +func (c *ClientFactory) NewPrivateLinkResourcesClient() *PrivateLinkResourcesClient { + subClient, _ := NewPrivateLinkResourcesClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +// NewSchemaRegistryClient creates a new instance of SchemaRegistryClient. +func (c *ClientFactory) NewSchemaRegistryClient() *SchemaRegistryClient { + subClient, _ := NewSchemaRegistryClient(c.subscriptionID, c.credential, c.options) + return subClient +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/clusters_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/clusters_client.go new file mode 100644 index 00000000000..7c9b944b4c5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/clusters_client.go @@ -0,0 +1,577 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ClustersClient contains the methods for the Clusters group. +// Don't use this type directly, use NewClustersClient() instead. +type ClustersClient struct { + internal *arm.Client + subscriptionID string +} + +// NewClustersClient creates a new instance of ClustersClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewClustersClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClustersClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &ClustersClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Creates or updates an instance of an Event Hubs Cluster. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - clusterName - The name of the Event Hubs Cluster. +// - parameters - Parameters for creating a eventhub cluster resource. +// - options - ClustersClientBeginCreateOrUpdateOptions contains the optional parameters for the ClustersClient.BeginCreateOrUpdate +// method. +func (client *ClustersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersClientBeginCreateOrUpdateOptions) (*runtime.Poller[ClustersClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, clusterName, parameters, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ClustersClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ClustersClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// CreateOrUpdate - Creates or updates an instance of an Event Hubs Cluster. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +func (client *ClustersClient) createOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersClientBeginCreateOrUpdateOptions) (*http.Response, error) { + var err error + const operationName = "ClustersClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, clusterName, parameters, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *ClustersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// BeginDelete - Deletes an existing Event Hubs Cluster. This operation is idempotent. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - clusterName - The name of the Event Hubs Cluster. +// - options - ClustersClientBeginDeleteOptions contains the optional parameters for the ClustersClient.BeginDelete method. +func (client *ClustersClient) BeginDelete(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientBeginDeleteOptions) (*runtime.Poller[ClustersClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, clusterName, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ClustersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ClustersClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Delete - Deletes an existing Event Hubs Cluster. This operation is idempotent. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +func (client *ClustersClient) deleteOperation(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientBeginDeleteOptions) (*http.Response, error) { + var err error + const operationName = "ClustersClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, clusterName, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *ClustersClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets the resource description of the specified Event Hubs Cluster. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - clusterName - The name of the Event Hubs Cluster. +// - options - ClustersClientGetOptions contains the optional parameters for the ClustersClient.Get method. +func (client *ClustersClient) Get(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientGetOptions) (ClustersClientGetResponse, error) { + var err error + const operationName = "ClustersClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, clusterName, options) + if err != nil { + return ClustersClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ClustersClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ClustersClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *ClustersClient) getHandleResponse(resp *http.Response) (ClustersClientGetResponse, error) { + result := ClustersClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Cluster); err != nil { + return ClustersClientGetResponse{}, err + } + return result, nil +} + +// ListAvailableClusterRegion - List the quantity of available pre-provisioned Event Hubs Clusters, indexed by Azure region. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - options - ClustersClientListAvailableClusterRegionOptions contains the optional parameters for the ClustersClient.ListAvailableClusterRegion +// method. +func (client *ClustersClient) ListAvailableClusterRegion(ctx context.Context, options *ClustersClientListAvailableClusterRegionOptions) (ClustersClientListAvailableClusterRegionResponse, error) { + var err error + const operationName = "ClustersClient.ListAvailableClusterRegion" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.listAvailableClusterRegionCreateRequest(ctx, options) + if err != nil { + return ClustersClientListAvailableClusterRegionResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ClustersClientListAvailableClusterRegionResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ClustersClientListAvailableClusterRegionResponse{}, err + } + resp, err := client.listAvailableClusterRegionHandleResponse(httpResp) + return resp, err +} + +// listAvailableClusterRegionCreateRequest creates the ListAvailableClusterRegion request. +func (client *ClustersClient) listAvailableClusterRegionCreateRequest(ctx context.Context, options *ClustersClientListAvailableClusterRegionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/availableClusterRegions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAvailableClusterRegionHandleResponse handles the ListAvailableClusterRegion response. +func (client *ClustersClient) listAvailableClusterRegionHandleResponse(resp *http.Response) (ClustersClientListAvailableClusterRegionResponse, error) { + result := ClustersClientListAvailableClusterRegionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AvailableClustersList); err != nil { + return ClustersClientListAvailableClusterRegionResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists the available Event Hubs Clusters within an ARM resource group +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - options - ClustersClientListByResourceGroupOptions contains the optional parameters for the ClustersClient.NewListByResourceGroupPager +// method. +func (client *ClustersClient) NewListByResourceGroupPager(resourceGroupName string, options *ClustersClientListByResourceGroupOptions) *runtime.Pager[ClustersClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[ClustersClientListByResourceGroupResponse]{ + More: func(page ClustersClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ClustersClientListByResourceGroupResponse) (ClustersClientListByResourceGroupResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ClustersClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) + if err != nil { + return ClustersClientListByResourceGroupResponse{}, err + } + return client.listByResourceGroupHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *ClustersClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ClustersClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *ClustersClient) listByResourceGroupHandleResponse(resp *http.Response) (ClustersClientListByResourceGroupResponse, error) { + result := ClustersClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil { + return ClustersClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - Lists the available Event Hubs Clusters within an ARM resource group +// +// Generated from API version 2021-11-01 +// - options - ClustersClientListBySubscriptionOptions contains the optional parameters for the ClustersClient.NewListBySubscriptionPager +// method. +func (client *ClustersClient) NewListBySubscriptionPager(options *ClustersClientListBySubscriptionOptions) *runtime.Pager[ClustersClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[ClustersClientListBySubscriptionResponse]{ + More: func(page ClustersClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ClustersClientListBySubscriptionResponse) (ClustersClientListBySubscriptionResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ClustersClient.NewListBySubscriptionPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listBySubscriptionCreateRequest(ctx, options) + }, nil) + if err != nil { + return ClustersClientListBySubscriptionResponse{}, err + } + return client.listBySubscriptionHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *ClustersClient) listBySubscriptionCreateRequest(ctx context.Context, options *ClustersClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/clusters" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *ClustersClient) listBySubscriptionHandleResponse(resp *http.Response) (ClustersClientListBySubscriptionResponse, error) { + result := ClustersClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ClusterListResult); err != nil { + return ClustersClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// ListNamespaces - List all Event Hubs Namespace IDs in an Event Hubs Dedicated Cluster. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - clusterName - The name of the Event Hubs Cluster. +// - options - ClustersClientListNamespacesOptions contains the optional parameters for the ClustersClient.ListNamespaces method. +func (client *ClustersClient) ListNamespaces(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientListNamespacesOptions) (ClustersClientListNamespacesResponse, error) { + var err error + const operationName = "ClustersClient.ListNamespaces" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.listNamespacesCreateRequest(ctx, resourceGroupName, clusterName, options) + if err != nil { + return ClustersClientListNamespacesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ClustersClientListNamespacesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ClustersClientListNamespacesResponse{}, err + } + resp, err := client.listNamespacesHandleResponse(httpResp) + return resp, err +} + +// listNamespacesCreateRequest creates the ListNamespaces request. +func (client *ClustersClient) listNamespacesCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ClustersClientListNamespacesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}/namespaces" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listNamespacesHandleResponse handles the ListNamespaces response. +func (client *ClustersClient) listNamespacesHandleResponse(resp *http.Response) (ClustersClientListNamespacesResponse, error) { + result := ClustersClientListNamespacesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EHNamespaceIDListResult); err != nil { + return ClustersClientListNamespacesResponse{}, err + } + return result, nil +} + +// BeginUpdate - Modifies mutable properties on the Event Hubs Cluster. This operation is idempotent. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - clusterName - The name of the Event Hubs Cluster. +// - parameters - The properties of the Event Hubs Cluster which should be updated. +// - options - ClustersClientBeginUpdateOptions contains the optional parameters for the ClustersClient.BeginUpdate method. +func (client *ClustersClient) BeginUpdate(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersClientBeginUpdateOptions) (*runtime.Poller[ClustersClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, clusterName, parameters, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ClustersClientUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ClustersClientUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Update - Modifies mutable properties on the Event Hubs Cluster. This operation is idempotent. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +func (client *ClustersClient) update(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersClientBeginUpdateOptions) (*http.Response, error) { + var err error + const operationName = "ClustersClient.BeginUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.updateCreateRequest(ctx, resourceGroupName, clusterName, parameters, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// updateCreateRequest creates the Update request. +func (client *ClustersClient) updateCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, parameters Cluster, options *ClustersClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/configuration_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/configuration_client.go new file mode 100644 index 00000000000..fa0e64b59c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/configuration_client.go @@ -0,0 +1,179 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ConfigurationClient contains the methods for the Configuration group. +// Don't use this type directly, use NewConfigurationClient() instead. +type ConfigurationClient struct { + internal *arm.Client + subscriptionID string +} + +// NewConfigurationClient creates a new instance of ConfigurationClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewConfigurationClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ConfigurationClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &ConfigurationClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// Get - Get all Event Hubs Cluster settings - a collection of key/value pairs which represent the quotas and settings imposed +// on the cluster. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - clusterName - The name of the Event Hubs Cluster. +// - options - ConfigurationClientGetOptions contains the optional parameters for the ConfigurationClient.Get method. +func (client *ConfigurationClient) Get(ctx context.Context, resourceGroupName string, clusterName string, options *ConfigurationClientGetOptions) (ConfigurationClientGetResponse, error) { + var err error + const operationName = "ConfigurationClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, clusterName, options) + if err != nil { + return ConfigurationClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ConfigurationClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ConfigurationClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *ConfigurationClient) getCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, options *ConfigurationClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}/quotaConfiguration/default" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *ConfigurationClient) getHandleResponse(resp *http.Response) (ConfigurationClientGetResponse, error) { + result := ConfigurationClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ClusterQuotaConfigurationProperties); err != nil { + return ConfigurationClientGetResponse{}, err + } + return result, nil +} + +// Patch - Replace all specified Event Hubs Cluster settings with those contained in the request body. Leaves the settings +// not specified in the request body unmodified. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - clusterName - The name of the Event Hubs Cluster. +// - parameters - Parameters for creating an Event Hubs Cluster resource. +// - options - ConfigurationClientPatchOptions contains the optional parameters for the ConfigurationClient.Patch method. +func (client *ConfigurationClient) Patch(ctx context.Context, resourceGroupName string, clusterName string, parameters ClusterQuotaConfigurationProperties, options *ConfigurationClientPatchOptions) (ConfigurationClientPatchResponse, error) { + var err error + const operationName = "ConfigurationClient.Patch" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.patchCreateRequest(ctx, resourceGroupName, clusterName, parameters, options) + if err != nil { + return ConfigurationClientPatchResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ConfigurationClientPatchResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ConfigurationClientPatchResponse{}, err + } + resp, err := client.patchHandleResponse(httpResp) + return resp, err +} + +// patchCreateRequest creates the Patch request. +func (client *ConfigurationClient) patchCreateRequest(ctx context.Context, resourceGroupName string, clusterName string, parameters ClusterQuotaConfigurationProperties, options *ConfigurationClientPatchOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}/quotaConfiguration/default" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// patchHandleResponse handles the Patch response. +func (client *ConfigurationClient) patchHandleResponse(resp *http.Response) (ConfigurationClientPatchResponse, error) { + result := ConfigurationClientPatchResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ClusterQuotaConfigurationProperties); err != nil { + return ConfigurationClientPatchResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/constants.go new file mode 100644 index 00000000000..1a6fbdc9e76 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/constants.go @@ -0,0 +1,367 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +const ( + moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub" + moduleVersion = "v1.2.0" +) + +type AccessRights string + +const ( + AccessRightsListen AccessRights = "Listen" + AccessRightsManage AccessRights = "Manage" + AccessRightsSend AccessRights = "Send" +) + +// PossibleAccessRightsValues returns the possible values for the AccessRights const type. +func PossibleAccessRightsValues() []AccessRights { + return []AccessRights{ + AccessRightsListen, + AccessRightsManage, + AccessRightsSend, + } +} + +// ClusterSKUName - Name of this SKU. +type ClusterSKUName string + +const ( + ClusterSKUNameDedicated ClusterSKUName = "Dedicated" +) + +// PossibleClusterSKUNameValues returns the possible values for the ClusterSKUName const type. +func PossibleClusterSKUNameValues() []ClusterSKUName { + return []ClusterSKUName{ + ClusterSKUNameDedicated, + } +} + +// CreatedByType - The type of identity that created the resource. +type CreatedByType string + +const ( + CreatedByTypeApplication CreatedByType = "Application" + CreatedByTypeKey CreatedByType = "Key" + CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity" + CreatedByTypeUser CreatedByType = "User" +) + +// PossibleCreatedByTypeValues returns the possible values for the CreatedByType const type. +func PossibleCreatedByTypeValues() []CreatedByType { + return []CreatedByType{ + CreatedByTypeApplication, + CreatedByTypeKey, + CreatedByTypeManagedIdentity, + CreatedByTypeUser, + } +} + +// DefaultAction - Default Action for Network Rule Set +type DefaultAction string + +const ( + DefaultActionAllow DefaultAction = "Allow" + DefaultActionDeny DefaultAction = "Deny" +) + +// PossibleDefaultActionValues returns the possible values for the DefaultAction const type. +func PossibleDefaultActionValues() []DefaultAction { + return []DefaultAction{ + DefaultActionAllow, + DefaultActionDeny, + } +} + +// EncodingCaptureDescription - Enumerates the possible values for the encoding format of capture description. Note: 'AvroDeflate' +// will be deprecated in New API Version +type EncodingCaptureDescription string + +const ( + EncodingCaptureDescriptionAvro EncodingCaptureDescription = "Avro" + EncodingCaptureDescriptionAvroDeflate EncodingCaptureDescription = "AvroDeflate" +) + +// PossibleEncodingCaptureDescriptionValues returns the possible values for the EncodingCaptureDescription const type. +func PossibleEncodingCaptureDescriptionValues() []EncodingCaptureDescription { + return []EncodingCaptureDescription{ + EncodingCaptureDescriptionAvro, + EncodingCaptureDescriptionAvroDeflate, + } +} + +// EndPointProvisioningState - Provisioning state of the Private Endpoint Connection. +type EndPointProvisioningState string + +const ( + EndPointProvisioningStateCanceled EndPointProvisioningState = "Canceled" + EndPointProvisioningStateCreating EndPointProvisioningState = "Creating" + EndPointProvisioningStateDeleting EndPointProvisioningState = "Deleting" + EndPointProvisioningStateFailed EndPointProvisioningState = "Failed" + EndPointProvisioningStateSucceeded EndPointProvisioningState = "Succeeded" + EndPointProvisioningStateUpdating EndPointProvisioningState = "Updating" +) + +// PossibleEndPointProvisioningStateValues returns the possible values for the EndPointProvisioningState const type. +func PossibleEndPointProvisioningStateValues() []EndPointProvisioningState { + return []EndPointProvisioningState{ + EndPointProvisioningStateCanceled, + EndPointProvisioningStateCreating, + EndPointProvisioningStateDeleting, + EndPointProvisioningStateFailed, + EndPointProvisioningStateSucceeded, + EndPointProvisioningStateUpdating, + } +} + +// EntityStatus - Enumerates the possible values for the status of the Event Hub. +type EntityStatus string + +const ( + EntityStatusActive EntityStatus = "Active" + EntityStatusCreating EntityStatus = "Creating" + EntityStatusDeleting EntityStatus = "Deleting" + EntityStatusDisabled EntityStatus = "Disabled" + EntityStatusReceiveDisabled EntityStatus = "ReceiveDisabled" + EntityStatusRenaming EntityStatus = "Renaming" + EntityStatusRestoring EntityStatus = "Restoring" + EntityStatusSendDisabled EntityStatus = "SendDisabled" + EntityStatusUnknown EntityStatus = "Unknown" +) + +// PossibleEntityStatusValues returns the possible values for the EntityStatus const type. +func PossibleEntityStatusValues() []EntityStatus { + return []EntityStatus{ + EntityStatusActive, + EntityStatusCreating, + EntityStatusDeleting, + EntityStatusDisabled, + EntityStatusReceiveDisabled, + EntityStatusRenaming, + EntityStatusRestoring, + EntityStatusSendDisabled, + EntityStatusUnknown, + } +} + +// KeyType - The access key to regenerate. +type KeyType string + +const ( + KeyTypePrimaryKey KeyType = "PrimaryKey" + KeyTypeSecondaryKey KeyType = "SecondaryKey" +) + +// PossibleKeyTypeValues returns the possible values for the KeyType const type. +func PossibleKeyTypeValues() []KeyType { + return []KeyType{ + KeyTypePrimaryKey, + KeyTypeSecondaryKey, + } +} + +// ManagedServiceIdentityType - Type of managed service identity. +type ManagedServiceIdentityType string + +const ( + ManagedServiceIdentityTypeNone ManagedServiceIdentityType = "None" + ManagedServiceIdentityTypeSystemAssigned ManagedServiceIdentityType = "SystemAssigned" + ManagedServiceIdentityTypeSystemAssignedUserAssigned ManagedServiceIdentityType = "SystemAssigned, UserAssigned" + ManagedServiceIdentityTypeUserAssigned ManagedServiceIdentityType = "UserAssigned" +) + +// PossibleManagedServiceIdentityTypeValues returns the possible values for the ManagedServiceIdentityType const type. +func PossibleManagedServiceIdentityTypeValues() []ManagedServiceIdentityType { + return []ManagedServiceIdentityType{ + ManagedServiceIdentityTypeNone, + ManagedServiceIdentityTypeSystemAssigned, + ManagedServiceIdentityTypeSystemAssignedUserAssigned, + ManagedServiceIdentityTypeUserAssigned, + } +} + +// NetworkRuleIPAction - The IP Filter Action +type NetworkRuleIPAction string + +const ( + NetworkRuleIPActionAllow NetworkRuleIPAction = "Allow" +) + +// PossibleNetworkRuleIPActionValues returns the possible values for the NetworkRuleIPAction const type. +func PossibleNetworkRuleIPActionValues() []NetworkRuleIPAction { + return []NetworkRuleIPAction{ + NetworkRuleIPActionAllow, + } +} + +// PrivateLinkConnectionStatus - Status of the connection. +type PrivateLinkConnectionStatus string + +const ( + PrivateLinkConnectionStatusApproved PrivateLinkConnectionStatus = "Approved" + PrivateLinkConnectionStatusDisconnected PrivateLinkConnectionStatus = "Disconnected" + PrivateLinkConnectionStatusPending PrivateLinkConnectionStatus = "Pending" + PrivateLinkConnectionStatusRejected PrivateLinkConnectionStatus = "Rejected" +) + +// PossiblePrivateLinkConnectionStatusValues returns the possible values for the PrivateLinkConnectionStatus const type. +func PossiblePrivateLinkConnectionStatusValues() []PrivateLinkConnectionStatus { + return []PrivateLinkConnectionStatus{ + PrivateLinkConnectionStatusApproved, + PrivateLinkConnectionStatusDisconnected, + PrivateLinkConnectionStatusPending, + PrivateLinkConnectionStatusRejected, + } +} + +// ProvisioningStateDR - Provisioning state of the Alias(Disaster Recovery configuration) - possible values 'Accepted' or +// 'Succeeded' or 'Failed' +type ProvisioningStateDR string + +const ( + ProvisioningStateDRAccepted ProvisioningStateDR = "Accepted" + ProvisioningStateDRFailed ProvisioningStateDR = "Failed" + ProvisioningStateDRSucceeded ProvisioningStateDR = "Succeeded" +) + +// PossibleProvisioningStateDRValues returns the possible values for the ProvisioningStateDR const type. +func PossibleProvisioningStateDRValues() []ProvisioningStateDR { + return []ProvisioningStateDR{ + ProvisioningStateDRAccepted, + ProvisioningStateDRFailed, + ProvisioningStateDRSucceeded, + } +} + +// PublicNetworkAccessFlag - This determines if traffic is allowed over public network. By default it is enabled. +type PublicNetworkAccessFlag string + +const ( + PublicNetworkAccessFlagDisabled PublicNetworkAccessFlag = "Disabled" + PublicNetworkAccessFlagEnabled PublicNetworkAccessFlag = "Enabled" +) + +// PossiblePublicNetworkAccessFlagValues returns the possible values for the PublicNetworkAccessFlag const type. +func PossiblePublicNetworkAccessFlagValues() []PublicNetworkAccessFlag { + return []PublicNetworkAccessFlag{ + PublicNetworkAccessFlagDisabled, + PublicNetworkAccessFlagEnabled, + } +} + +// RoleDisasterRecovery - role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or 'Secondary' +type RoleDisasterRecovery string + +const ( + RoleDisasterRecoveryPrimary RoleDisasterRecovery = "Primary" + RoleDisasterRecoveryPrimaryNotReplicating RoleDisasterRecovery = "PrimaryNotReplicating" + RoleDisasterRecoverySecondary RoleDisasterRecovery = "Secondary" +) + +// PossibleRoleDisasterRecoveryValues returns the possible values for the RoleDisasterRecovery const type. +func PossibleRoleDisasterRecoveryValues() []RoleDisasterRecovery { + return []RoleDisasterRecovery{ + RoleDisasterRecoveryPrimary, + RoleDisasterRecoveryPrimaryNotReplicating, + RoleDisasterRecoverySecondary, + } +} + +// SKUName - Name of this SKU. +type SKUName string + +const ( + SKUNameBasic SKUName = "Basic" + SKUNamePremium SKUName = "Premium" + SKUNameStandard SKUName = "Standard" +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return []SKUName{ + SKUNameBasic, + SKUNamePremium, + SKUNameStandard, + } +} + +// SKUTier - The billing tier of this particular SKU. +type SKUTier string + +const ( + SKUTierBasic SKUTier = "Basic" + SKUTierPremium SKUTier = "Premium" + SKUTierStandard SKUTier = "Standard" +) + +// PossibleSKUTierValues returns the possible values for the SKUTier const type. +func PossibleSKUTierValues() []SKUTier { + return []SKUTier{ + SKUTierBasic, + SKUTierPremium, + SKUTierStandard, + } +} + +type SchemaCompatibility string + +const ( + SchemaCompatibilityBackward SchemaCompatibility = "Backward" + SchemaCompatibilityForward SchemaCompatibility = "Forward" + SchemaCompatibilityNone SchemaCompatibility = "None" +) + +// PossibleSchemaCompatibilityValues returns the possible values for the SchemaCompatibility const type. +func PossibleSchemaCompatibilityValues() []SchemaCompatibility { + return []SchemaCompatibility{ + SchemaCompatibilityBackward, + SchemaCompatibilityForward, + SchemaCompatibilityNone, + } +} + +type SchemaType string + +const ( + SchemaTypeAvro SchemaType = "Avro" + SchemaTypeUnknown SchemaType = "Unknown" +) + +// PossibleSchemaTypeValues returns the possible values for the SchemaType const type. +func PossibleSchemaTypeValues() []SchemaType { + return []SchemaType{ + SchemaTypeAvro, + SchemaTypeUnknown, + } +} + +// UnavailableReason - Specifies the reason for the unavailability of the service. +type UnavailableReason string + +const ( + UnavailableReasonInvalidName UnavailableReason = "InvalidName" + UnavailableReasonNameInLockdown UnavailableReason = "NameInLockdown" + UnavailableReasonNameInUse UnavailableReason = "NameInUse" + UnavailableReasonNone UnavailableReason = "None" + UnavailableReasonSubscriptionIsDisabled UnavailableReason = "SubscriptionIsDisabled" + UnavailableReasonTooManyNamespaceInCurrentSubscription UnavailableReason = "TooManyNamespaceInCurrentSubscription" +) + +// PossibleUnavailableReasonValues returns the possible values for the UnavailableReason const type. +func PossibleUnavailableReasonValues() []UnavailableReason { + return []UnavailableReason{ + UnavailableReasonInvalidName, + UnavailableReasonNameInLockdown, + UnavailableReasonNameInUse, + UnavailableReasonNone, + UnavailableReasonSubscriptionIsDisabled, + UnavailableReasonTooManyNamespaceInCurrentSubscription, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/consumergroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/consumergroups_client.go new file mode 100644 index 00000000000..7a341ee1a0d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/consumergroups_client.go @@ -0,0 +1,340 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// ConsumerGroupsClient contains the methods for the ConsumerGroups group. +// Don't use this type directly, use NewConsumerGroupsClient() instead. +type ConsumerGroupsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewConsumerGroupsClient creates a new instance of ConsumerGroupsClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewConsumerGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ConsumerGroupsClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &ConsumerGroupsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CreateOrUpdate - Creates or updates an Event Hubs consumer group as a nested resource within a Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - consumerGroupName - The consumer group name +// - parameters - Parameters supplied to create or update a consumer group resource. +// - options - ConsumerGroupsClientCreateOrUpdateOptions contains the optional parameters for the ConsumerGroupsClient.CreateOrUpdate +// method. +func (client *ConsumerGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, parameters ConsumerGroup, options *ConsumerGroupsClientCreateOrUpdateOptions) (ConsumerGroupsClientCreateOrUpdateResponse, error) { + var err error + const operationName = "ConsumerGroupsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, consumerGroupName, parameters, options) + if err != nil { + return ConsumerGroupsClientCreateOrUpdateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ConsumerGroupsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ConsumerGroupsClientCreateOrUpdateResponse{}, err + } + resp, err := client.createOrUpdateHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *ConsumerGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, parameters ConsumerGroup, options *ConsumerGroupsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if consumerGroupName == "" { + return nil, errors.New("parameter consumerGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{consumerGroupName}", url.PathEscape(consumerGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *ConsumerGroupsClient) createOrUpdateHandleResponse(resp *http.Response) (ConsumerGroupsClientCreateOrUpdateResponse, error) { + result := ConsumerGroupsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ConsumerGroup); err != nil { + return ConsumerGroupsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// Delete - Deletes a consumer group from the specified Event Hub and resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - consumerGroupName - The consumer group name +// - options - ConsumerGroupsClientDeleteOptions contains the optional parameters for the ConsumerGroupsClient.Delete method. +func (client *ConsumerGroupsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, options *ConsumerGroupsClientDeleteOptions) (ConsumerGroupsClientDeleteResponse, error) { + var err error + const operationName = "ConsumerGroupsClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, consumerGroupName, options) + if err != nil { + return ConsumerGroupsClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ConsumerGroupsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return ConsumerGroupsClientDeleteResponse{}, err + } + return ConsumerGroupsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *ConsumerGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, options *ConsumerGroupsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if consumerGroupName == "" { + return nil, errors.New("parameter consumerGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{consumerGroupName}", url.PathEscape(consumerGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets a description for the specified consumer group. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - consumerGroupName - The consumer group name +// - options - ConsumerGroupsClientGetOptions contains the optional parameters for the ConsumerGroupsClient.Get method. +func (client *ConsumerGroupsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, options *ConsumerGroupsClientGetOptions) (ConsumerGroupsClientGetResponse, error) { + var err error + const operationName = "ConsumerGroupsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, consumerGroupName, options) + if err != nil { + return ConsumerGroupsClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ConsumerGroupsClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ConsumerGroupsClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *ConsumerGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, options *ConsumerGroupsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if consumerGroupName == "" { + return nil, errors.New("parameter consumerGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{consumerGroupName}", url.PathEscape(consumerGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *ConsumerGroupsClient) getHandleResponse(resp *http.Response) (ConsumerGroupsClientGetResponse, error) { + result := ConsumerGroupsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ConsumerGroup); err != nil { + return ConsumerGroupsClientGetResponse{}, err + } + return result, nil +} + +// NewListByEventHubPager - Gets all the consumer groups in a Namespace. An empty feed is returned if no consumer group exists +// in the Namespace. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - options - ConsumerGroupsClientListByEventHubOptions contains the optional parameters for the ConsumerGroupsClient.NewListByEventHubPager +// method. +func (client *ConsumerGroupsClient) NewListByEventHubPager(resourceGroupName string, namespaceName string, eventHubName string, options *ConsumerGroupsClientListByEventHubOptions) *runtime.Pager[ConsumerGroupsClientListByEventHubResponse] { + return runtime.NewPager(runtime.PagingHandler[ConsumerGroupsClientListByEventHubResponse]{ + More: func(page ConsumerGroupsClientListByEventHubResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ConsumerGroupsClientListByEventHubResponse) (ConsumerGroupsClientListByEventHubResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ConsumerGroupsClient.NewListByEventHubPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByEventHubCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, options) + }, nil) + if err != nil { + return ConsumerGroupsClientListByEventHubResponse{}, err + } + return client.listByEventHubHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listByEventHubCreateRequest creates the ListByEventHub request. +func (client *ConsumerGroupsClient) listByEventHubCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *ConsumerGroupsClientListByEventHubOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", strconv.FormatInt(int64(*options.Skip), 10)) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByEventHubHandleResponse handles the ListByEventHub response. +func (client *ConsumerGroupsClient) listByEventHubHandleResponse(resp *http.Response) (ConsumerGroupsClientListByEventHubResponse, error) { + result := ConsumerGroupsClientListByEventHubResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ConsumerGroupListResult); err != nil { + return ConsumerGroupsClientListByEventHubResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_client.go new file mode 100644 index 00000000000..0f7036a3c88 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_client.go @@ -0,0 +1,723 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// DisasterRecoveryConfigsClient contains the methods for the DisasterRecoveryConfigs group. +// Don't use this type directly, use NewDisasterRecoveryConfigsClient() instead. +type DisasterRecoveryConfigsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewDisasterRecoveryConfigsClient creates a new instance of DisasterRecoveryConfigsClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewDisasterRecoveryConfigsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DisasterRecoveryConfigsClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &DisasterRecoveryConfigsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BreakPairing - This operation disables the Disaster Recovery and stops replicating changes from primary to secondary namespaces +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - options - DisasterRecoveryConfigsClientBreakPairingOptions contains the optional parameters for the DisasterRecoveryConfigsClient.BreakPairing +// method. +func (client *DisasterRecoveryConfigsClient) BreakPairing(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientBreakPairingOptions) (DisasterRecoveryConfigsClientBreakPairingResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.BreakPairing" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.breakPairingCreateRequest(ctx, resourceGroupName, namespaceName, alias, options) + if err != nil { + return DisasterRecoveryConfigsClientBreakPairingResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientBreakPairingResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientBreakPairingResponse{}, err + } + return DisasterRecoveryConfigsClientBreakPairingResponse{}, nil +} + +// breakPairingCreateRequest creates the BreakPairing request. +func (client *DisasterRecoveryConfigsClient) breakPairingCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientBreakPairingOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/breakPairing" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// CheckNameAvailability - Check the give Namespace name availability. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - parameters - Parameters to check availability of the given Alias name +// - options - DisasterRecoveryConfigsClientCheckNameAvailabilityOptions contains the optional parameters for the DisasterRecoveryConfigsClient.CheckNameAvailability +// method. +func (client *DisasterRecoveryConfigsClient) CheckNameAvailability(ctx context.Context, resourceGroupName string, namespaceName string, parameters CheckNameAvailabilityParameter, options *DisasterRecoveryConfigsClientCheckNameAvailabilityOptions) (DisasterRecoveryConfigsClientCheckNameAvailabilityResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.CheckNameAvailability" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.checkNameAvailabilityCreateRequest(ctx, resourceGroupName, namespaceName, parameters, options) + if err != nil { + return DisasterRecoveryConfigsClientCheckNameAvailabilityResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientCheckNameAvailabilityResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientCheckNameAvailabilityResponse{}, err + } + resp, err := client.checkNameAvailabilityHandleResponse(httpResp) + return resp, err +} + +// checkNameAvailabilityCreateRequest creates the CheckNameAvailability request. +func (client *DisasterRecoveryConfigsClient) checkNameAvailabilityCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, parameters CheckNameAvailabilityParameter, options *DisasterRecoveryConfigsClientCheckNameAvailabilityOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/checkNameAvailability" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// checkNameAvailabilityHandleResponse handles the CheckNameAvailability response. +func (client *DisasterRecoveryConfigsClient) checkNameAvailabilityHandleResponse(resp *http.Response) (DisasterRecoveryConfigsClientCheckNameAvailabilityResponse, error) { + result := DisasterRecoveryConfigsClientCheckNameAvailabilityResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityResult); err != nil { + return DisasterRecoveryConfigsClientCheckNameAvailabilityResponse{}, err + } + return result, nil +} + +// CreateOrUpdate - Creates or updates a new Alias(Disaster Recovery configuration) +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - parameters - Parameters required to create an Alias(Disaster Recovery configuration) +// - options - DisasterRecoveryConfigsClientCreateOrUpdateOptions contains the optional parameters for the DisasterRecoveryConfigsClient.CreateOrUpdate +// method. +func (client *DisasterRecoveryConfigsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, alias string, parameters ArmDisasterRecovery, options *DisasterRecoveryConfigsClientCreateOrUpdateOptions) (DisasterRecoveryConfigsClientCreateOrUpdateResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, namespaceName, alias, parameters, options) + if err != nil { + return DisasterRecoveryConfigsClientCreateOrUpdateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientCreateOrUpdateResponse{}, err + } + resp, err := client.createOrUpdateHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *DisasterRecoveryConfigsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, parameters ArmDisasterRecovery, options *DisasterRecoveryConfigsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *DisasterRecoveryConfigsClient) createOrUpdateHandleResponse(resp *http.Response) (DisasterRecoveryConfigsClientCreateOrUpdateResponse, error) { + result := DisasterRecoveryConfigsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ArmDisasterRecovery); err != nil { + return DisasterRecoveryConfigsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// Delete - Deletes an Alias(Disaster Recovery configuration) +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - options - DisasterRecoveryConfigsClientDeleteOptions contains the optional parameters for the DisasterRecoveryConfigsClient.Delete +// method. +func (client *DisasterRecoveryConfigsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientDeleteOptions) (DisasterRecoveryConfigsClientDeleteResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, namespaceName, alias, options) + if err != nil { + return DisasterRecoveryConfigsClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientDeleteResponse{}, err + } + return DisasterRecoveryConfigsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *DisasterRecoveryConfigsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// FailOver - Invokes GEO DR failover and reconfigure the alias to point to the secondary namespace +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - options - DisasterRecoveryConfigsClientFailOverOptions contains the optional parameters for the DisasterRecoveryConfigsClient.FailOver +// method. +func (client *DisasterRecoveryConfigsClient) FailOver(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientFailOverOptions) (DisasterRecoveryConfigsClientFailOverResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.FailOver" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.failOverCreateRequest(ctx, resourceGroupName, namespaceName, alias, options) + if err != nil { + return DisasterRecoveryConfigsClientFailOverResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientFailOverResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientFailOverResponse{}, err + } + return DisasterRecoveryConfigsClientFailOverResponse{}, nil +} + +// failOverCreateRequest creates the FailOver request. +func (client *DisasterRecoveryConfigsClient) failOverCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientFailOverOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/failover" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves Alias(Disaster Recovery configuration) for primary or secondary namespace +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - options - DisasterRecoveryConfigsClientGetOptions contains the optional parameters for the DisasterRecoveryConfigsClient.Get +// method. +func (client *DisasterRecoveryConfigsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientGetOptions) (DisasterRecoveryConfigsClientGetResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, namespaceName, alias, options) + if err != nil { + return DisasterRecoveryConfigsClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *DisasterRecoveryConfigsClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *DisasterRecoveryConfigsClient) getHandleResponse(resp *http.Response) (DisasterRecoveryConfigsClientGetResponse, error) { + result := DisasterRecoveryConfigsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ArmDisasterRecovery); err != nil { + return DisasterRecoveryConfigsClientGetResponse{}, err + } + return result, nil +} + +// GetAuthorizationRule - Gets an AuthorizationRule for a Namespace by rule name. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - authorizationRuleName - The authorization rule name. +// - options - DisasterRecoveryConfigsClientGetAuthorizationRuleOptions contains the optional parameters for the DisasterRecoveryConfigsClient.GetAuthorizationRule +// method. +func (client *DisasterRecoveryConfigsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string, options *DisasterRecoveryConfigsClientGetAuthorizationRuleOptions) (DisasterRecoveryConfigsClientGetAuthorizationRuleResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.GetAuthorizationRule" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getAuthorizationRuleCreateRequest(ctx, resourceGroupName, namespaceName, alias, authorizationRuleName, options) + if err != nil { + return DisasterRecoveryConfigsClientGetAuthorizationRuleResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientGetAuthorizationRuleResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientGetAuthorizationRuleResponse{}, err + } + resp, err := client.getAuthorizationRuleHandleResponse(httpResp) + return resp, err +} + +// getAuthorizationRuleCreateRequest creates the GetAuthorizationRule request. +func (client *DisasterRecoveryConfigsClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string, options *DisasterRecoveryConfigsClientGetAuthorizationRuleOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules/{authorizationRuleName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getAuthorizationRuleHandleResponse handles the GetAuthorizationRule response. +func (client *DisasterRecoveryConfigsClient) getAuthorizationRuleHandleResponse(resp *http.Response) (DisasterRecoveryConfigsClientGetAuthorizationRuleResponse, error) { + result := DisasterRecoveryConfigsClientGetAuthorizationRuleResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil { + return DisasterRecoveryConfigsClientGetAuthorizationRuleResponse{}, err + } + return result, nil +} + +// NewListPager - Gets all Alias(Disaster Recovery configurations) +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - DisasterRecoveryConfigsClientListOptions contains the optional parameters for the DisasterRecoveryConfigsClient.NewListPager +// method. +func (client *DisasterRecoveryConfigsClient) NewListPager(resourceGroupName string, namespaceName string, options *DisasterRecoveryConfigsClientListOptions) *runtime.Pager[DisasterRecoveryConfigsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[DisasterRecoveryConfigsClientListResponse]{ + More: func(page DisasterRecoveryConfigsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DisasterRecoveryConfigsClientListResponse) (DisasterRecoveryConfigsClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "DisasterRecoveryConfigsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, namespaceName, options) + }, nil) + if err != nil { + return DisasterRecoveryConfigsClientListResponse{}, err + } + return client.listHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listCreateRequest creates the List request. +func (client *DisasterRecoveryConfigsClient) listCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *DisasterRecoveryConfigsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *DisasterRecoveryConfigsClient) listHandleResponse(resp *http.Response) (DisasterRecoveryConfigsClientListResponse, error) { + result := DisasterRecoveryConfigsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ArmDisasterRecoveryListResult); err != nil { + return DisasterRecoveryConfigsClientListResponse{}, err + } + return result, nil +} + +// NewListAuthorizationRulesPager - Gets a list of authorization rules for a Namespace. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - options - DisasterRecoveryConfigsClientListAuthorizationRulesOptions contains the optional parameters for the DisasterRecoveryConfigsClient.NewListAuthorizationRulesPager +// method. +func (client *DisasterRecoveryConfigsClient) NewListAuthorizationRulesPager(resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientListAuthorizationRulesOptions) *runtime.Pager[DisasterRecoveryConfigsClientListAuthorizationRulesResponse] { + return runtime.NewPager(runtime.PagingHandler[DisasterRecoveryConfigsClientListAuthorizationRulesResponse]{ + More: func(page DisasterRecoveryConfigsClientListAuthorizationRulesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DisasterRecoveryConfigsClientListAuthorizationRulesResponse) (DisasterRecoveryConfigsClientListAuthorizationRulesResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "DisasterRecoveryConfigsClient.NewListAuthorizationRulesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAuthorizationRulesCreateRequest(ctx, resourceGroupName, namespaceName, alias, options) + }, nil) + if err != nil { + return DisasterRecoveryConfigsClientListAuthorizationRulesResponse{}, err + } + return client.listAuthorizationRulesHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listAuthorizationRulesCreateRequest creates the ListAuthorizationRules request. +func (client *DisasterRecoveryConfigsClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, options *DisasterRecoveryConfigsClientListAuthorizationRulesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAuthorizationRulesHandleResponse handles the ListAuthorizationRules response. +func (client *DisasterRecoveryConfigsClient) listAuthorizationRulesHandleResponse(resp *http.Response) (DisasterRecoveryConfigsClientListAuthorizationRulesResponse, error) { + result := DisasterRecoveryConfigsClientListAuthorizationRulesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRuleListResult); err != nil { + return DisasterRecoveryConfigsClientListAuthorizationRulesResponse{}, err + } + return result, nil +} + +// ListKeys - Gets the primary and secondary connection strings for the Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - alias - The Disaster Recovery configuration name +// - authorizationRuleName - The authorization rule name. +// - options - DisasterRecoveryConfigsClientListKeysOptions contains the optional parameters for the DisasterRecoveryConfigsClient.ListKeys +// method. +func (client *DisasterRecoveryConfigsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string, options *DisasterRecoveryConfigsClientListKeysOptions) (DisasterRecoveryConfigsClientListKeysResponse, error) { + var err error + const operationName = "DisasterRecoveryConfigsClient.ListKeys" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.listKeysCreateRequest(ctx, resourceGroupName, namespaceName, alias, authorizationRuleName, options) + if err != nil { + return DisasterRecoveryConfigsClientListKeysResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DisasterRecoveryConfigsClientListKeysResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DisasterRecoveryConfigsClientListKeysResponse{}, err + } + resp, err := client.listKeysHandleResponse(httpResp) + return resp, err +} + +// listKeysCreateRequest creates the ListKeys request. +func (client *DisasterRecoveryConfigsClient) listKeysCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string, options *DisasterRecoveryConfigsClientListKeysOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules/{authorizationRuleName}/listKeys" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if alias == "" { + return nil, errors.New("parameter alias cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{alias}", url.PathEscape(alias)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeysHandleResponse handles the ListKeys response. +func (client *DisasterRecoveryConfigsClient) listKeysHandleResponse(resp *http.Response) (DisasterRecoveryConfigsClientListKeysResponse, error) { + result := DisasterRecoveryConfigsClientListKeysResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil { + return DisasterRecoveryConfigsClientListKeysResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/eventhubs_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/eventhubs_client.go new file mode 100644 index 00000000000..bd643f90504 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/eventhubs_client.go @@ -0,0 +1,762 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// EventHubsClient contains the methods for the EventHubs group. +// Don't use this type directly, use NewEventHubsClient() instead. +type EventHubsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewEventHubsClient creates a new instance of EventHubsClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewEventHubsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*EventHubsClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &EventHubsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CreateOrUpdate - Creates or updates a new Event Hub as a nested resource within a Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - parameters - Parameters supplied to create an Event Hub resource. +// - options - EventHubsClientCreateOrUpdateOptions contains the optional parameters for the EventHubsClient.CreateOrUpdate +// method. +func (client *EventHubsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, parameters Eventhub, options *EventHubsClientCreateOrUpdateOptions) (EventHubsClientCreateOrUpdateResponse, error) { + var err error + const operationName = "EventHubsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, parameters, options) + if err != nil { + return EventHubsClientCreateOrUpdateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientCreateOrUpdateResponse{}, err + } + resp, err := client.createOrUpdateHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *EventHubsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, parameters Eventhub, options *EventHubsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *EventHubsClient) createOrUpdateHandleResponse(resp *http.Response) (EventHubsClientCreateOrUpdateResponse, error) { + result := EventHubsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Eventhub); err != nil { + return EventHubsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// CreateOrUpdateAuthorizationRule - Creates or updates an AuthorizationRule for the specified Event Hub. Creation/update +// of the AuthorizationRule will take a few seconds to take effect. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - authorizationRuleName - The authorization rule name. +// - parameters - The shared access AuthorizationRule. +// - options - EventHubsClientCreateOrUpdateAuthorizationRuleOptions contains the optional parameters for the EventHubsClient.CreateOrUpdateAuthorizationRule +// method. +func (client *EventHubsClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule, options *EventHubsClientCreateOrUpdateAuthorizationRuleOptions) (EventHubsClientCreateOrUpdateAuthorizationRuleResponse, error) { + var err error + const operationName = "EventHubsClient.CreateOrUpdateAuthorizationRule" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateAuthorizationRuleCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName, parameters, options) + if err != nil { + return EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + resp, err := client.createOrUpdateAuthorizationRuleHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateAuthorizationRuleCreateRequest creates the CreateOrUpdateAuthorizationRule request. +func (client *EventHubsClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule, options *EventHubsClientCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateAuthorizationRuleHandleResponse handles the CreateOrUpdateAuthorizationRule response. +func (client *EventHubsClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientCreateOrUpdateAuthorizationRuleResponse, error) { + result := EventHubsClientCreateOrUpdateAuthorizationRuleResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil { + return EventHubsClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + return result, nil +} + +// Delete - Deletes an Event Hub from the specified Namespace and resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - options - EventHubsClientDeleteOptions contains the optional parameters for the EventHubsClient.Delete method. +func (client *EventHubsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientDeleteOptions) (EventHubsClientDeleteResponse, error) { + var err error + const operationName = "EventHubsClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, options) + if err != nil { + return EventHubsClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientDeleteResponse{}, err + } + return EventHubsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *EventHubsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// DeleteAuthorizationRule - Deletes an Event Hub AuthorizationRule. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - authorizationRuleName - The authorization rule name. +// - options - EventHubsClientDeleteAuthorizationRuleOptions contains the optional parameters for the EventHubsClient.DeleteAuthorizationRule +// method. +func (client *EventHubsClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientDeleteAuthorizationRuleOptions) (EventHubsClientDeleteAuthorizationRuleResponse, error) { + var err error + const operationName = "EventHubsClient.DeleteAuthorizationRule" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteAuthorizationRuleCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName, options) + if err != nil { + return EventHubsClientDeleteAuthorizationRuleResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientDeleteAuthorizationRuleResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientDeleteAuthorizationRuleResponse{}, err + } + return EventHubsClientDeleteAuthorizationRuleResponse{}, nil +} + +// deleteAuthorizationRuleCreateRequest creates the DeleteAuthorizationRule request. +func (client *EventHubsClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientDeleteAuthorizationRuleOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets an Event Hubs description for the specified Event Hub. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - options - EventHubsClientGetOptions contains the optional parameters for the EventHubsClient.Get method. +func (client *EventHubsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientGetOptions) (EventHubsClientGetResponse, error) { + var err error + const operationName = "EventHubsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, options) + if err != nil { + return EventHubsClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *EventHubsClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *EventHubsClient) getHandleResponse(resp *http.Response) (EventHubsClientGetResponse, error) { + result := EventHubsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Eventhub); err != nil { + return EventHubsClientGetResponse{}, err + } + return result, nil +} + +// GetAuthorizationRule - Gets an AuthorizationRule for an Event Hub by rule name. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - authorizationRuleName - The authorization rule name. +// - options - EventHubsClientGetAuthorizationRuleOptions contains the optional parameters for the EventHubsClient.GetAuthorizationRule +// method. +func (client *EventHubsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientGetAuthorizationRuleOptions) (EventHubsClientGetAuthorizationRuleResponse, error) { + var err error + const operationName = "EventHubsClient.GetAuthorizationRule" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getAuthorizationRuleCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName, options) + if err != nil { + return EventHubsClientGetAuthorizationRuleResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientGetAuthorizationRuleResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientGetAuthorizationRuleResponse{}, err + } + resp, err := client.getAuthorizationRuleHandleResponse(httpResp) + return resp, err +} + +// getAuthorizationRuleCreateRequest creates the GetAuthorizationRule request. +func (client *EventHubsClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientGetAuthorizationRuleOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getAuthorizationRuleHandleResponse handles the GetAuthorizationRule response. +func (client *EventHubsClient) getAuthorizationRuleHandleResponse(resp *http.Response) (EventHubsClientGetAuthorizationRuleResponse, error) { + result := EventHubsClientGetAuthorizationRuleResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil { + return EventHubsClientGetAuthorizationRuleResponse{}, err + } + return result, nil +} + +// NewListAuthorizationRulesPager - Gets the authorization rules for an Event Hub. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - options - EventHubsClientListAuthorizationRulesOptions contains the optional parameters for the EventHubsClient.NewListAuthorizationRulesPager +// method. +func (client *EventHubsClient) NewListAuthorizationRulesPager(resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) *runtime.Pager[EventHubsClientListAuthorizationRulesResponse] { + return runtime.NewPager(runtime.PagingHandler[EventHubsClientListAuthorizationRulesResponse]{ + More: func(page EventHubsClientListAuthorizationRulesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *EventHubsClientListAuthorizationRulesResponse) (EventHubsClientListAuthorizationRulesResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "EventHubsClient.NewListAuthorizationRulesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAuthorizationRulesCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, options) + }, nil) + if err != nil { + return EventHubsClientListAuthorizationRulesResponse{}, err + } + return client.listAuthorizationRulesHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listAuthorizationRulesCreateRequest creates the ListAuthorizationRules request. +func (client *EventHubsClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, options *EventHubsClientListAuthorizationRulesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAuthorizationRulesHandleResponse handles the ListAuthorizationRules response. +func (client *EventHubsClient) listAuthorizationRulesHandleResponse(resp *http.Response) (EventHubsClientListAuthorizationRulesResponse, error) { + result := EventHubsClientListAuthorizationRulesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRuleListResult); err != nil { + return EventHubsClientListAuthorizationRulesResponse{}, err + } + return result, nil +} + +// NewListByNamespacePager - Gets all the Event Hubs in a Namespace. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - EventHubsClientListByNamespaceOptions contains the optional parameters for the EventHubsClient.NewListByNamespacePager +// method. +func (client *EventHubsClient) NewListByNamespacePager(resourceGroupName string, namespaceName string, options *EventHubsClientListByNamespaceOptions) *runtime.Pager[EventHubsClientListByNamespaceResponse] { + return runtime.NewPager(runtime.PagingHandler[EventHubsClientListByNamespaceResponse]{ + More: func(page EventHubsClientListByNamespaceResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *EventHubsClientListByNamespaceResponse) (EventHubsClientListByNamespaceResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "EventHubsClient.NewListByNamespacePager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByNamespaceCreateRequest(ctx, resourceGroupName, namespaceName, options) + }, nil) + if err != nil { + return EventHubsClientListByNamespaceResponse{}, err + } + return client.listByNamespaceHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listByNamespaceCreateRequest creates the ListByNamespace request. +func (client *EventHubsClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *EventHubsClientListByNamespaceOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", strconv.FormatInt(int64(*options.Skip), 10)) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByNamespaceHandleResponse handles the ListByNamespace response. +func (client *EventHubsClient) listByNamespaceHandleResponse(resp *http.Response) (EventHubsClientListByNamespaceResponse, error) { + result := EventHubsClientListByNamespaceResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil { + return EventHubsClientListByNamespaceResponse{}, err + } + return result, nil +} + +// ListKeys - Gets the ACS and SAS connection strings for the Event Hub. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - authorizationRuleName - The authorization rule name. +// - options - EventHubsClientListKeysOptions contains the optional parameters for the EventHubsClient.ListKeys method. +func (client *EventHubsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientListKeysOptions) (EventHubsClientListKeysResponse, error) { + var err error + const operationName = "EventHubsClient.ListKeys" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.listKeysCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName, options) + if err != nil { + return EventHubsClientListKeysResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientListKeysResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientListKeysResponse{}, err + } + resp, err := client.listKeysHandleResponse(httpResp) + return resp, err +} + +// listKeysCreateRequest creates the ListKeys request. +func (client *EventHubsClient) listKeysCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, options *EventHubsClientListKeysOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}/listKeys" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeysHandleResponse handles the ListKeys response. +func (client *EventHubsClient) listKeysHandleResponse(resp *http.Response) (EventHubsClientListKeysResponse, error) { + result := EventHubsClientListKeysResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil { + return EventHubsClientListKeysResponse{}, err + } + return result, nil +} + +// RegenerateKeys - Regenerates the ACS and SAS connection strings for the Event Hub. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - eventHubName - The Event Hub name +// - authorizationRuleName - The authorization rule name. +// - parameters - Parameters supplied to regenerate the AuthorizationRule Keys (PrimaryKey/SecondaryKey). +// - options - EventHubsClientRegenerateKeysOptions contains the optional parameters for the EventHubsClient.RegenerateKeys +// method. +func (client *EventHubsClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters, options *EventHubsClientRegenerateKeysOptions) (EventHubsClientRegenerateKeysResponse, error) { + var err error + const operationName = "EventHubsClient.RegenerateKeys" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.regenerateKeysCreateRequest(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName, parameters, options) + if err != nil { + return EventHubsClientRegenerateKeysResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return EventHubsClientRegenerateKeysResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return EventHubsClientRegenerateKeysResponse{}, err + } + resp, err := client.regenerateKeysHandleResponse(httpResp) + return resp, err +} + +// regenerateKeysCreateRequest creates the RegenerateKeys request. +func (client *EventHubsClient) regenerateKeysCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters, options *EventHubsClientRegenerateKeysOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}/regenerateKeys" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if eventHubName == "" { + return nil, errors.New("parameter eventHubName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{eventHubName}", url.PathEscape(eventHubName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// regenerateKeysHandleResponse handles the RegenerateKeys response. +func (client *EventHubsClient) regenerateKeysHandleResponse(resp *http.Response) (EventHubsClientRegenerateKeysResponse, error) { + result := EventHubsClientRegenerateKeysResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil { + return EventHubsClientRegenerateKeysResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models.go new file mode 100644 index 00000000000..6eb9f8dd8c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models.go @@ -0,0 +1,902 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import "time" + +// AccessKeys - Namespace/EventHub Connection String +type AccessKeys struct { + // READ-ONLY; Primary connection string of the alias if GEO DR is enabled + AliasPrimaryConnectionString *string + + // READ-ONLY; Secondary connection string of the alias if GEO DR is enabled + AliasSecondaryConnectionString *string + + // READ-ONLY; A string that describes the AuthorizationRule. + KeyName *string + + // READ-ONLY; Primary connection string of the created namespace AuthorizationRule. + PrimaryConnectionString *string + + // READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token. + PrimaryKey *string + + // READ-ONLY; Secondary connection string of the created namespace AuthorizationRule. + SecondaryConnectionString *string + + // READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token. + SecondaryKey *string +} + +// ArmDisasterRecovery - Single item in List or Get Alias(Disaster Recovery configuration) operation +type ArmDisasterRecovery struct { + // Properties required to the Create Or Update Alias(Disaster Recovery configurations) + Properties *ArmDisasterRecoveryProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// ArmDisasterRecoveryListResult - The result of the List Alias(Disaster Recovery configuration) operation. +type ArmDisasterRecoveryListResult struct { + // List of Alias(Disaster Recovery configurations) + Value []*ArmDisasterRecovery + + // READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of Alias(Disaster Recovery configuration) + NextLink *string +} + +// ArmDisasterRecoveryProperties - Properties required to the Create Or Update Alias(Disaster Recovery configurations) +type ArmDisasterRecoveryProperties struct { + // Alternate name specified when alias and namespace names are same. + AlternateName *string + + // ARM Id of the Primary/Secondary eventhub namespace name, which is part of GEO DR pairing + PartnerNamespace *string + + // READ-ONLY; Number of entities pending to be replicated. + PendingReplicationOperationsCount *int64 + + // READ-ONLY; Provisioning state of the Alias(Disaster Recovery configuration) - possible values 'Accepted' or 'Succeeded' + // or 'Failed' + ProvisioningState *ProvisioningStateDR + + // READ-ONLY; role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or 'Secondary' + Role *RoleDisasterRecovery +} + +// AuthorizationRule - Single item in a List or Get AuthorizationRule operation +type AuthorizationRule struct { + // Properties supplied to create or update AuthorizationRule + Properties *AuthorizationRuleProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// AuthorizationRuleListResult - The response from the List namespace operation. +type AuthorizationRuleListResult struct { + // Link to the next set of results. Not empty if Value contains an incomplete list of Authorization Rules + NextLink *string + + // Result of the List Authorization Rules operation. + Value []*AuthorizationRule +} + +// AuthorizationRuleProperties - Properties supplied to create or update AuthorizationRule +type AuthorizationRuleProperties struct { + // REQUIRED; The rights associated with the rule. + Rights []*AccessRights +} + +// AvailableCluster - Pre-provisioned and readily available Event Hubs Cluster count per region. +type AvailableCluster struct { + // Location fo the Available Cluster + Location *string +} + +// AvailableClustersList - The response of the List Available Clusters operation. +type AvailableClustersList struct { + // The count of readily available and pre-provisioned Event Hubs Clusters per region. + Value []*AvailableCluster +} + +// CaptureDescription - Properties to configure capture description for eventhub +type CaptureDescription struct { + // Properties of Destination where capture will be stored. (Storage Account, Blob Names) + Destination *Destination + + // A value that indicates whether capture description is enabled. + Enabled *bool + + // Enumerates the possible values for the encoding format of capture description. Note: 'AvroDeflate' will be deprecated in + // New API Version + Encoding *EncodingCaptureDescription + + // The time window allows you to set the frequency with which the capture to Azure Blobs will happen, value should between + // 60 to 900 seconds + IntervalInSeconds *int32 + + // The size window defines the amount of data built up in your Event Hub before an capture operation, value should be between + // 10485760 to 524288000 bytes + SizeLimitInBytes *int32 + + // A value that indicates whether to Skip Empty Archives + SkipEmptyArchives *bool +} + +// CheckNameAvailabilityParameter - Parameter supplied to check Namespace name availability operation +type CheckNameAvailabilityParameter struct { + // REQUIRED; Name to check the namespace name availability + Name *string +} + +// CheckNameAvailabilityResult - The Result of the CheckNameAvailability operation +type CheckNameAvailabilityResult struct { + // Value indicating Namespace is availability, true if the Namespace is available; otherwise, false. + NameAvailable *bool + + // The reason for unavailability of a Namespace. + Reason *UnavailableReason + + // READ-ONLY; The detailed info regarding the reason associated with the Namespace. + Message *string +} + +// Cluster - Single Event Hubs Cluster resource in List or Get operations. +type Cluster struct { + // Resource location. + Location *string + + // Event Hubs Cluster properties supplied in responses in List or Get operations. + Properties *ClusterProperties + + // Properties of the cluster SKU. + SKU *ClusterSKU + + // Resource tags. + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// ClusterListResult - The response of the List Event Hubs Clusters operation. +type ClusterListResult struct { + // Link to the next set of results. Empty unless the value parameter contains an incomplete list of Event Hubs Clusters. + NextLink *string + + // The Event Hubs Clusters present in the List Event Hubs operation results. + Value []*Cluster +} + +// ClusterProperties - Event Hubs Cluster properties supplied in responses in List or Get operations. +type ClusterProperties struct { + // READ-ONLY; The UTC time when the Event Hubs Cluster was created. + CreatedAt *string + + // READ-ONLY; The metric ID of the cluster resource. Provided by the service and not modifiable by the user. + MetricID *string + + // READ-ONLY; Status of the Cluster resource + Status *string + + // READ-ONLY; The UTC time when the Event Hubs Cluster was last updated. + UpdatedAt *string +} + +// ClusterQuotaConfigurationProperties - Contains all settings for the cluster. +type ClusterQuotaConfigurationProperties struct { + // All possible Cluster settings - a collection of key/value paired settings which apply to quotas and configurations imposed + // on the cluster. + Settings map[string]*string +} + +// ClusterSKU - SKU parameters particular to a cluster instance. +type ClusterSKU struct { + // REQUIRED; Name of this SKU. + Name *ClusterSKUName + + // The quantity of Event Hubs Cluster Capacity Units contained in this cluster. + Capacity *int32 +} + +// ConnectionState information. +type ConnectionState struct { + // Description of the connection state. + Description *string + + // Status of the connection. + Status *PrivateLinkConnectionStatus +} + +// ConsumerGroup - Single item in List or Get Consumer group operation +type ConsumerGroup struct { + // Single item in List or Get Consumer group operation + Properties *ConsumerGroupProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// ConsumerGroupListResult - The result to the List Consumer Group operation. +type ConsumerGroupListResult struct { + // Link to the next set of results. Not empty if Value contains incomplete list of Consumer Group + NextLink *string + + // Result of the List Consumer Group operation. + Value []*ConsumerGroup +} + +// ConsumerGroupProperties - Single item in List or Get Consumer group operation +type ConsumerGroupProperties struct { + // User Metadata is a placeholder to store user-defined string data with maximum length 1024. e.g. it can be used to store + // descriptive data, such as list of teams and their contact information also + // user-defined configuration settings can be stored. + UserMetadata *string + + // READ-ONLY; Exact time the message was created. + CreatedAt *time.Time + + // READ-ONLY; The exact time the message was updated. + UpdatedAt *time.Time +} + +// Destination - Capture storage details for capture description +type Destination struct { + // Name for capture destination + Name *string + + // Properties describing the storage account, blob container and archive name format for capture destination + Properties *DestinationProperties +} + +// DestinationProperties - Properties describing the storage account, blob container and archive name format for capture destination +type DestinationProperties struct { + // Blob naming convention for archive, e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. + // Here all the parameters (Namespace,EventHub .. etc) are mandatory + // irrespective of order + ArchiveNameFormat *string + + // Blob container Name + BlobContainer *string + + // The Azure Data Lake Store name for the captured events + DataLakeAccountName *string + + // The destination folder path for the captured events + DataLakeFolderPath *string + + // Subscription Id of Azure Data Lake Store + DataLakeSubscriptionID *string + + // Resource id of the storage account to be used to create the blobs + StorageAccountResourceID *string +} + +// EHNamespace - Single Namespace item in List or Get Operation +type EHNamespace struct { + // Properties of BYOK Identity description + Identity *Identity + + // Resource location. + Location *string + + // Namespace properties supplied for create namespace operation. + Properties *EHNamespaceProperties + + // Properties of sku resource + SKU *SKU + + // Resource tags. + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// EHNamespaceIDContainer - The full ARM ID of an Event Hubs Namespace +type EHNamespaceIDContainer struct { + // id parameter + ID *string +} + +// EHNamespaceIDListResult - The response of the List Namespace IDs operation +type EHNamespaceIDListResult struct { + // Result of the List Namespace IDs operation + Value []*EHNamespaceIDContainer +} + +// EHNamespaceListResult - The response of the List Namespace operation +type EHNamespaceListResult struct { + // Link to the next set of results. Not empty if Value contains incomplete list of namespaces. + NextLink *string + + // Result of the List Namespace operation + Value []*EHNamespace +} + +// EHNamespaceProperties - Namespace properties supplied for create namespace operation. +type EHNamespaceProperties struct { + // Alternate name specified when alias and namespace names are same. + AlternateName *string + + // Cluster ARM ID of the Namespace. + ClusterArmID *string + + // This property disables SAS authentication for the Event Hubs namespace. + DisableLocalAuth *bool + + // Properties of BYOK Encryption description + Encryption *Encryption + + // Value that indicates whether AutoInflate is enabled for eventhub namespace. + IsAutoInflateEnabled *bool + + // Value that indicates whether Kafka is enabled for eventhub namespace. + KafkaEnabled *bool + + // Upper limit of throughput units when AutoInflate is enabled, value should be within 0 to 20 throughput units. ( '0' if + // AutoInflateEnabled = true) + MaximumThroughputUnits *int32 + + // List of private endpoint connections. + PrivateEndpointConnections []*PrivateEndpointConnection + + // Enabling this property creates a Standard Event Hubs Namespace in regions supported availability zones. + ZoneRedundant *bool + + // READ-ONLY; The time the Namespace was created. + CreatedAt *time.Time + + // READ-ONLY; Identifier for Azure Insights metrics. + MetricID *string + + // READ-ONLY; Provisioning state of the Namespace. + ProvisioningState *string + + // READ-ONLY; Endpoint you can use to perform Service Bus operations. + ServiceBusEndpoint *string + + // READ-ONLY; Status of the Namespace. + Status *string + + // READ-ONLY; The time the Namespace was updated. + UpdatedAt *time.Time +} + +// Encryption - Properties to configure Encryption +type Encryption struct { + // Enumerates the possible value of keySource for Encryption + KeySource *string + + // Properties of KeyVault + KeyVaultProperties []*KeyVaultProperties + + // Enable Infrastructure Encryption (Double Encryption) + RequireInfrastructureEncryption *bool +} + +// ErrorAdditionalInfo - The resource management error additional info. +type ErrorAdditionalInfo struct { + // READ-ONLY; The additional info. + Info any + + // READ-ONLY; The additional info type. + Type *string +} + +// ErrorDetail - The error detail. +type ErrorDetail struct { + // READ-ONLY; The error additional info. + AdditionalInfo []*ErrorAdditionalInfo + + // READ-ONLY; The error code. + Code *string + + // READ-ONLY; The error details. + Details []*ErrorDetail + + // READ-ONLY; The error message. + Message *string + + // READ-ONLY; The error target. + Target *string +} + +// ErrorResponse - Error response indicates Event Hub service is not able to process the incoming request. The reason is provided +// in the error message. +type ErrorResponse struct { + // The error object. + Error *ErrorDetail +} + +// Eventhub - Single item in List or Get Event Hub operation +type Eventhub struct { + // Properties supplied to the Create Or Update Event Hub operation. + Properties *Properties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// Identity - Properties to configure Identity for Bring your Own Keys +type Identity struct { + // Type of managed service identity. + Type *ManagedServiceIdentityType + + // Properties for User Assigned Identities + UserAssignedIdentities map[string]*UserAssignedIdentity + + // READ-ONLY; ObjectId from the KeyVault + PrincipalID *string + + // READ-ONLY; TenantId from the KeyVault + TenantID *string +} + +// KeyVaultProperties - Properties to configure keyVault Properties +type KeyVaultProperties struct { + Identity *UserAssignedIdentityProperties + + // Name of the Key from KeyVault + KeyName *string + + // Uri of KeyVault + KeyVaultURI *string + + // Key Version + KeyVersion *string +} + +// ListResult - The result of the List EventHubs operation. +type ListResult struct { + // Link to the next set of results. Not empty if Value contains incomplete list of EventHubs. + NextLink *string + + // Result of the List EventHubs operation. + Value []*Eventhub +} + +// NWRuleSetIPRules - The response from the List namespace operation. +type NWRuleSetIPRules struct { + // The IP Filter Action + Action *NetworkRuleIPAction + + // IP Mask + IPMask *string +} + +// NWRuleSetVirtualNetworkRules - The response from the List namespace operation. +type NWRuleSetVirtualNetworkRules struct { + // Value that indicates whether to ignore missing Vnet Service Endpoint + IgnoreMissingVnetServiceEndpoint *bool + + // Subnet properties + Subnet *Subnet +} + +// NetworkRuleSet - Description of topic resource. +type NetworkRuleSet struct { + // NetworkRuleSet properties + Properties *NetworkRuleSetProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// NetworkRuleSetListResult - The response of the List NetworkRuleSet operation +type NetworkRuleSetListResult struct { + // Link to the next set of results. Not empty if Value contains incomplete list of NetworkRuleSet. + NextLink *string + + // Result of the List NetworkRuleSet operation + Value []*NetworkRuleSet +} + +// NetworkRuleSetProperties - NetworkRuleSet properties +type NetworkRuleSetProperties struct { + // Default Action for Network Rule Set + DefaultAction *DefaultAction + + // List of IpRules + IPRules []*NWRuleSetIPRules + + // This determines if traffic is allowed over public network. By default it is enabled. + PublicNetworkAccess *PublicNetworkAccessFlag + + // Value that indicates whether Trusted Service Access is Enabled or not. + TrustedServiceAccessEnabled *bool + + // List VirtualNetwork Rules + VirtualNetworkRules []*NWRuleSetVirtualNetworkRules +} + +// Operation - A Event Hub REST API operation +type Operation struct { + // Display of the operation + Display *OperationDisplay + + // Indicates whether the operation is a data action + IsDataAction *bool + + // Origin of the operation + Origin *string + + // Properties of the operation + Properties any + + // READ-ONLY; Operation name: {provider}/{resource}/{operation} + Name *string +} + +// OperationDisplay - Operation display payload +type OperationDisplay struct { + // READ-ONLY; Localized friendly description for the operation + Description *string + + // READ-ONLY; Localized friendly name for the operation + Operation *string + + // READ-ONLY; Resource provider of the operation + Provider *string + + // READ-ONLY; Resource of the operation + Resource *string +} + +// OperationListResult - Result of the request to list Event Hub operations. It contains a list of operations and a URL link +// to get the next set of results. +type OperationListResult struct { + // READ-ONLY; URL to get the next set of operation list results if there are any. + NextLink *string + + // READ-ONLY; List of Event Hub operations supported by the Microsoft.EventHub resource provider. + Value []*Operation +} + +// PrivateEndpoint information. +type PrivateEndpoint struct { + // The ARM identifier for Private Endpoint. + ID *string +} + +// PrivateEndpointConnection - Properties of the PrivateEndpointConnection. +type PrivateEndpointConnection struct { + // Properties of the PrivateEndpointConnection. + Properties *PrivateEndpointConnectionProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// PrivateEndpointConnectionListResult - Result of the list of all private endpoint connections operation. +type PrivateEndpointConnectionListResult struct { + // A link for the next page of private endpoint connection resources. + NextLink *string + + // A collection of private endpoint connection resources. + Value []*PrivateEndpointConnection +} + +// PrivateEndpointConnectionProperties - Properties of the private endpoint connection resource. +type PrivateEndpointConnectionProperties struct { + // The Private Endpoint resource for this Connection. + PrivateEndpoint *PrivateEndpoint + + // Details about the state of the connection. + PrivateLinkServiceConnectionState *ConnectionState + + // Provisioning state of the Private Endpoint Connection. + ProvisioningState *EndPointProvisioningState +} + +// PrivateLinkResource - Information of the private link resource. +type PrivateLinkResource struct { + // Fully qualified identifier of the resource. + ID *string + + // Name of the resource + Name *string + + // Properties of the private link resource. + Properties *PrivateLinkResourceProperties + + // Type of the resource + Type *string +} + +// PrivateLinkResourceProperties - Properties of PrivateLinkResource +type PrivateLinkResourceProperties struct { + // The private link resource group id. + GroupID *string + + // The private link resource required member names. + RequiredMembers []*string + + // The private link resource Private link DNS zone name. + RequiredZoneNames []*string +} + +// PrivateLinkResourcesListResult - Result of the List private link resources operation. +type PrivateLinkResourcesListResult struct { + // A link for the next page of private link resources. + NextLink *string + + // A collection of private link resources + Value []*PrivateLinkResource +} + +// Properties supplied to the Create Or Update Event Hub operation. +type Properties struct { + // Properties of capture description + CaptureDescription *CaptureDescription + + // Number of days to retain the events for this Event Hub, value should be 1 to 7 days + MessageRetentionInDays *int64 + + // Number of partitions created for the Event Hub, allowed values are from 1 to 32 partitions. + PartitionCount *int64 + + // Enumerates the possible values for the status of the Event Hub. + Status *EntityStatus + + // READ-ONLY; Exact time the Event Hub was created. + CreatedAt *time.Time + + // READ-ONLY; Current number of shards on the Event Hub. + PartitionIDs []*string + + // READ-ONLY; The exact time the message was updated. + UpdatedAt *time.Time +} + +// ProxyResource - Common fields that are returned in the response for all Azure Resource Manager resources +type ProxyResource struct { + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// RegenerateAccessKeyParameters - Parameters supplied to the Regenerate Authorization Rule operation, specifies which key +// needs to be reset. +type RegenerateAccessKeyParameters struct { + // REQUIRED; The access key to regenerate. + KeyType *KeyType + + // Optional, if the key value provided, is set for KeyType or autogenerated Key value set for keyType + Key *string +} + +// Resource - Common fields that are returned in the response for all Azure Resource Manager resources +type Resource struct { + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// SKU parameters supplied to the create namespace operation +type SKU struct { + // REQUIRED; Name of this SKU. + Name *SKUName + + // The Event Hubs throughput units for Basic or Standard tiers, where value should be 0 to 20 throughput units. The Event + // Hubs premium units for Premium tier, where value should be 0 to 10 premium units. + Capacity *int32 + + // The billing tier of this particular SKU. + Tier *SKUTier +} + +// SchemaGroup - Single item in List or Get Schema Group operation +type SchemaGroup struct { + Properties *SchemaGroupProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The geo-location where the resource lives + Location *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The system meta data relating to this resource. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.EventHub/Namespaces" or "Microsoft.EventHub/Namespaces/EventHubs" + Type *string +} + +// SchemaGroupListResult - The result of the List SchemaGroup operation. +type SchemaGroupListResult struct { + // Link to the next set of results. Not empty if Value contains incomplete list of Schema Groups. + NextLink *string + + // Result of the List SchemaGroups operation. + Value []*SchemaGroup +} + +type SchemaGroupProperties struct { + // dictionary object for SchemaGroup group properties + GroupProperties map[string]*string + SchemaCompatibility *SchemaCompatibility + SchemaType *SchemaType + + // READ-ONLY; Exact time the Schema Group was created. + CreatedAtUTC *time.Time + + // READ-ONLY; The ETag value. + ETag *string + + // READ-ONLY; Exact time the Schema Group was updated + UpdatedAtUTC *time.Time +} + +// Subnet - Properties supplied for Subnet +type Subnet struct { + // Resource ID of Virtual Network Subnet + ID *string +} + +// SystemData - Metadata pertaining to creation and last modification of the resource. +type SystemData struct { + // The timestamp of resource creation (UTC). + CreatedAt *time.Time + + // The identity that created the resource. + CreatedBy *string + + // The type of identity that created the resource. + CreatedByType *CreatedByType + + // The type of identity that last modified the resource. + LastModifiedAt *time.Time + + // The identity that last modified the resource. + LastModifiedBy *string + + // The type of identity that last modified the resource. + LastModifiedByType *CreatedByType +} + +// TrackedResource - Definition of resource. +type TrackedResource struct { + // Resource location. + Location *string + + // Resource tags. + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// UserAssignedIdentity - Recognized Dictionary value. +type UserAssignedIdentity struct { + // READ-ONLY; Client Id of user assigned identity + ClientID *string + + // READ-ONLY; Principal Id of user assigned identity + PrincipalID *string +} + +type UserAssignedIdentityProperties struct { + // ARM ID of user Identity selected for encryption + UserAssignedIdentity *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models_serde.go new file mode 100644 index 00000000000..e5b4c436bf3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/models_serde.go @@ -0,0 +1,2442 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type AccessKeys. +func (a AccessKeys) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "aliasPrimaryConnectionString", a.AliasPrimaryConnectionString) + populate(objectMap, "aliasSecondaryConnectionString", a.AliasSecondaryConnectionString) + populate(objectMap, "keyName", a.KeyName) + populate(objectMap, "primaryConnectionString", a.PrimaryConnectionString) + populate(objectMap, "primaryKey", a.PrimaryKey) + populate(objectMap, "secondaryConnectionString", a.SecondaryConnectionString) + populate(objectMap, "secondaryKey", a.SecondaryKey) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AccessKeys. +func (a *AccessKeys) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "aliasPrimaryConnectionString": + err = unpopulate(val, "AliasPrimaryConnectionString", &a.AliasPrimaryConnectionString) + delete(rawMsg, key) + case "aliasSecondaryConnectionString": + err = unpopulate(val, "AliasSecondaryConnectionString", &a.AliasSecondaryConnectionString) + delete(rawMsg, key) + case "keyName": + err = unpopulate(val, "KeyName", &a.KeyName) + delete(rawMsg, key) + case "primaryConnectionString": + err = unpopulate(val, "PrimaryConnectionString", &a.PrimaryConnectionString) + delete(rawMsg, key) + case "primaryKey": + err = unpopulate(val, "PrimaryKey", &a.PrimaryKey) + delete(rawMsg, key) + case "secondaryConnectionString": + err = unpopulate(val, "SecondaryConnectionString", &a.SecondaryConnectionString) + delete(rawMsg, key) + case "secondaryKey": + err = unpopulate(val, "SecondaryKey", &a.SecondaryKey) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ArmDisasterRecovery. +func (a ArmDisasterRecovery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", a.ID) + populate(objectMap, "location", a.Location) + populate(objectMap, "name", a.Name) + populate(objectMap, "properties", a.Properties) + populate(objectMap, "systemData", a.SystemData) + populate(objectMap, "type", a.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ArmDisasterRecovery. +func (a *ArmDisasterRecovery) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &a.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &a.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &a.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &a.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &a.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &a.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ArmDisasterRecoveryListResult. +func (a ArmDisasterRecoveryListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", a.NextLink) + populate(objectMap, "value", a.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ArmDisasterRecoveryListResult. +func (a *ArmDisasterRecoveryListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &a.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &a.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ArmDisasterRecoveryProperties. +func (a ArmDisasterRecoveryProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alternateName", a.AlternateName) + populate(objectMap, "partnerNamespace", a.PartnerNamespace) + populate(objectMap, "pendingReplicationOperationsCount", a.PendingReplicationOperationsCount) + populate(objectMap, "provisioningState", a.ProvisioningState) + populate(objectMap, "role", a.Role) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ArmDisasterRecoveryProperties. +func (a *ArmDisasterRecoveryProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alternateName": + err = unpopulate(val, "AlternateName", &a.AlternateName) + delete(rawMsg, key) + case "partnerNamespace": + err = unpopulate(val, "PartnerNamespace", &a.PartnerNamespace) + delete(rawMsg, key) + case "pendingReplicationOperationsCount": + err = unpopulate(val, "PendingReplicationOperationsCount", &a.PendingReplicationOperationsCount) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &a.ProvisioningState) + delete(rawMsg, key) + case "role": + err = unpopulate(val, "Role", &a.Role) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AuthorizationRule. +func (a AuthorizationRule) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", a.ID) + populate(objectMap, "location", a.Location) + populate(objectMap, "name", a.Name) + populate(objectMap, "properties", a.Properties) + populate(objectMap, "systemData", a.SystemData) + populate(objectMap, "type", a.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AuthorizationRule. +func (a *AuthorizationRule) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &a.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &a.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &a.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &a.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &a.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &a.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AuthorizationRuleListResult. +func (a AuthorizationRuleListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", a.NextLink) + populate(objectMap, "value", a.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AuthorizationRuleListResult. +func (a *AuthorizationRuleListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &a.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &a.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AuthorizationRuleProperties. +func (a AuthorizationRuleProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "rights", a.Rights) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AuthorizationRuleProperties. +func (a *AuthorizationRuleProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "rights": + err = unpopulate(val, "Rights", &a.Rights) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AvailableCluster. +func (a AvailableCluster) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "location", a.Location) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AvailableCluster. +func (a *AvailableCluster) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "location": + err = unpopulate(val, "Location", &a.Location) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AvailableClustersList. +func (a AvailableClustersList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", a.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AvailableClustersList. +func (a *AvailableClustersList) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &a.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CaptureDescription. +func (c CaptureDescription) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "destination", c.Destination) + populate(objectMap, "enabled", c.Enabled) + populate(objectMap, "encoding", c.Encoding) + populate(objectMap, "intervalInSeconds", c.IntervalInSeconds) + populate(objectMap, "sizeLimitInBytes", c.SizeLimitInBytes) + populate(objectMap, "skipEmptyArchives", c.SkipEmptyArchives) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CaptureDescription. +func (c *CaptureDescription) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "destination": + err = unpopulate(val, "Destination", &c.Destination) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &c.Enabled) + delete(rawMsg, key) + case "encoding": + err = unpopulate(val, "Encoding", &c.Encoding) + delete(rawMsg, key) + case "intervalInSeconds": + err = unpopulate(val, "IntervalInSeconds", &c.IntervalInSeconds) + delete(rawMsg, key) + case "sizeLimitInBytes": + err = unpopulate(val, "SizeLimitInBytes", &c.SizeLimitInBytes) + delete(rawMsg, key) + case "skipEmptyArchives": + err = unpopulate(val, "SkipEmptyArchives", &c.SkipEmptyArchives) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CheckNameAvailabilityParameter. +func (c CheckNameAvailabilityParameter) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "name", c.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CheckNameAvailabilityParameter. +func (c *CheckNameAvailabilityParameter) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CheckNameAvailabilityResult. +func (c CheckNameAvailabilityResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "message", c.Message) + populate(objectMap, "nameAvailable", c.NameAvailable) + populate(objectMap, "reason", c.Reason) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CheckNameAvailabilityResult. +func (c *CheckNameAvailabilityResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "message": + err = unpopulate(val, "Message", &c.Message) + delete(rawMsg, key) + case "nameAvailable": + err = unpopulate(val, "NameAvailable", &c.NameAvailable) + delete(rawMsg, key) + case "reason": + err = unpopulate(val, "Reason", &c.Reason) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Cluster. +func (c Cluster) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", c.ID) + populate(objectMap, "location", c.Location) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "sku", c.SKU) + populate(objectMap, "systemData", c.SystemData) + populate(objectMap, "tags", c.Tags) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Cluster. +func (c *Cluster) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &c.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &c.Properties) + delete(rawMsg, key) + case "sku": + err = unpopulate(val, "SKU", &c.SKU) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &c.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &c.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterListResult. +func (c ClusterListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", c.NextLink) + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterListResult. +func (c *ClusterListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &c.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterProperties. +func (c ClusterProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "createdAt", c.CreatedAt) + populate(objectMap, "metricId", c.MetricID) + populate(objectMap, "status", c.Status) + populate(objectMap, "updatedAt", c.UpdatedAt) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterProperties. +func (c *ClusterProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "createdAt": + err = unpopulate(val, "CreatedAt", &c.CreatedAt) + delete(rawMsg, key) + case "metricId": + err = unpopulate(val, "MetricID", &c.MetricID) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &c.Status) + delete(rawMsg, key) + case "updatedAt": + err = unpopulate(val, "UpdatedAt", &c.UpdatedAt) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterQuotaConfigurationProperties. +func (c ClusterQuotaConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "settings", c.Settings) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterQuotaConfigurationProperties. +func (c *ClusterQuotaConfigurationProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "settings": + err = unpopulate(val, "Settings", &c.Settings) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterSKU. +func (c ClusterSKU) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "capacity", c.Capacity) + populate(objectMap, "name", c.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterSKU. +func (c *ClusterSKU) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "capacity": + err = unpopulate(val, "Capacity", &c.Capacity) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ConnectionState. +func (c ConnectionState) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", c.Description) + populate(objectMap, "status", c.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ConnectionState. +func (c *ConnectionState) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &c.Description) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &c.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ConsumerGroup. +func (c ConsumerGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", c.ID) + populate(objectMap, "location", c.Location) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "systemData", c.SystemData) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ConsumerGroup. +func (c *ConsumerGroup) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &c.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &c.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &c.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ConsumerGroupListResult. +func (c ConsumerGroupListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", c.NextLink) + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ConsumerGroupListResult. +func (c *ConsumerGroupListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &c.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ConsumerGroupProperties. +func (c ConsumerGroupProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateDateTimeRFC3339(objectMap, "createdAt", c.CreatedAt) + populateDateTimeRFC3339(objectMap, "updatedAt", c.UpdatedAt) + populate(objectMap, "userMetadata", c.UserMetadata) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ConsumerGroupProperties. +func (c *ConsumerGroupProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "createdAt": + err = unpopulateDateTimeRFC3339(val, "CreatedAt", &c.CreatedAt) + delete(rawMsg, key) + case "updatedAt": + err = unpopulateDateTimeRFC3339(val, "UpdatedAt", &c.UpdatedAt) + delete(rawMsg, key) + case "userMetadata": + err = unpopulate(val, "UserMetadata", &c.UserMetadata) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Destination. +func (d Destination) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "name", d.Name) + populate(objectMap, "properties", d.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Destination. +func (d *Destination) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "name": + err = unpopulate(val, "Name", &d.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &d.Properties) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DestinationProperties. +func (d DestinationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "archiveNameFormat", d.ArchiveNameFormat) + populate(objectMap, "blobContainer", d.BlobContainer) + populate(objectMap, "dataLakeAccountName", d.DataLakeAccountName) + populate(objectMap, "dataLakeFolderPath", d.DataLakeFolderPath) + populate(objectMap, "dataLakeSubscriptionId", d.DataLakeSubscriptionID) + populate(objectMap, "storageAccountResourceId", d.StorageAccountResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DestinationProperties. +func (d *DestinationProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "archiveNameFormat": + err = unpopulate(val, "ArchiveNameFormat", &d.ArchiveNameFormat) + delete(rawMsg, key) + case "blobContainer": + err = unpopulate(val, "BlobContainer", &d.BlobContainer) + delete(rawMsg, key) + case "dataLakeAccountName": + err = unpopulate(val, "DataLakeAccountName", &d.DataLakeAccountName) + delete(rawMsg, key) + case "dataLakeFolderPath": + err = unpopulate(val, "DataLakeFolderPath", &d.DataLakeFolderPath) + delete(rawMsg, key) + case "dataLakeSubscriptionId": + err = unpopulate(val, "DataLakeSubscriptionID", &d.DataLakeSubscriptionID) + delete(rawMsg, key) + case "storageAccountResourceId": + err = unpopulate(val, "StorageAccountResourceID", &d.StorageAccountResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EHNamespace. +func (e EHNamespace) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", e.ID) + populate(objectMap, "identity", e.Identity) + populate(objectMap, "location", e.Location) + populate(objectMap, "name", e.Name) + populate(objectMap, "properties", e.Properties) + populate(objectMap, "sku", e.SKU) + populate(objectMap, "systemData", e.SystemData) + populate(objectMap, "tags", e.Tags) + populate(objectMap, "type", e.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EHNamespace. +func (e *EHNamespace) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &e.ID) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &e.Identity) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &e.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &e.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &e.Properties) + delete(rawMsg, key) + case "sku": + err = unpopulate(val, "SKU", &e.SKU) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &e.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &e.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &e.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EHNamespaceIDContainer. +func (e EHNamespaceIDContainer) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", e.ID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EHNamespaceIDContainer. +func (e *EHNamespaceIDContainer) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &e.ID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EHNamespaceIDListResult. +func (e EHNamespaceIDListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "value", e.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EHNamespaceIDListResult. +func (e *EHNamespaceIDListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = unpopulate(val, "Value", &e.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EHNamespaceListResult. +func (e EHNamespaceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", e.NextLink) + populate(objectMap, "value", e.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EHNamespaceListResult. +func (e *EHNamespaceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &e.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &e.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EHNamespaceProperties. +func (e EHNamespaceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "alternateName", e.AlternateName) + populate(objectMap, "clusterArmId", e.ClusterArmID) + populateDateTimeRFC3339(objectMap, "createdAt", e.CreatedAt) + populate(objectMap, "disableLocalAuth", e.DisableLocalAuth) + populate(objectMap, "encryption", e.Encryption) + populate(objectMap, "isAutoInflateEnabled", e.IsAutoInflateEnabled) + populate(objectMap, "kafkaEnabled", e.KafkaEnabled) + populate(objectMap, "maximumThroughputUnits", e.MaximumThroughputUnits) + populate(objectMap, "metricId", e.MetricID) + populate(objectMap, "privateEndpointConnections", e.PrivateEndpointConnections) + populate(objectMap, "provisioningState", e.ProvisioningState) + populate(objectMap, "serviceBusEndpoint", e.ServiceBusEndpoint) + populate(objectMap, "status", e.Status) + populateDateTimeRFC3339(objectMap, "updatedAt", e.UpdatedAt) + populate(objectMap, "zoneRedundant", e.ZoneRedundant) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EHNamespaceProperties. +func (e *EHNamespaceProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "alternateName": + err = unpopulate(val, "AlternateName", &e.AlternateName) + delete(rawMsg, key) + case "clusterArmId": + err = unpopulate(val, "ClusterArmID", &e.ClusterArmID) + delete(rawMsg, key) + case "createdAt": + err = unpopulateDateTimeRFC3339(val, "CreatedAt", &e.CreatedAt) + delete(rawMsg, key) + case "disableLocalAuth": + err = unpopulate(val, "DisableLocalAuth", &e.DisableLocalAuth) + delete(rawMsg, key) + case "encryption": + err = unpopulate(val, "Encryption", &e.Encryption) + delete(rawMsg, key) + case "isAutoInflateEnabled": + err = unpopulate(val, "IsAutoInflateEnabled", &e.IsAutoInflateEnabled) + delete(rawMsg, key) + case "kafkaEnabled": + err = unpopulate(val, "KafkaEnabled", &e.KafkaEnabled) + delete(rawMsg, key) + case "maximumThroughputUnits": + err = unpopulate(val, "MaximumThroughputUnits", &e.MaximumThroughputUnits) + delete(rawMsg, key) + case "metricId": + err = unpopulate(val, "MetricID", &e.MetricID) + delete(rawMsg, key) + case "privateEndpointConnections": + err = unpopulate(val, "PrivateEndpointConnections", &e.PrivateEndpointConnections) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &e.ProvisioningState) + delete(rawMsg, key) + case "serviceBusEndpoint": + err = unpopulate(val, "ServiceBusEndpoint", &e.ServiceBusEndpoint) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &e.Status) + delete(rawMsg, key) + case "updatedAt": + err = unpopulateDateTimeRFC3339(val, "UpdatedAt", &e.UpdatedAt) + delete(rawMsg, key) + case "zoneRedundant": + err = unpopulate(val, "ZoneRedundant", &e.ZoneRedundant) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Encryption. +func (e Encryption) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["keySource"] = "Microsoft.KeyVault" + populate(objectMap, "keyVaultProperties", e.KeyVaultProperties) + populate(objectMap, "requireInfrastructureEncryption", e.RequireInfrastructureEncryption) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Encryption. +func (e *Encryption) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "keySource": + err = unpopulate(val, "KeySource", &e.KeySource) + delete(rawMsg, key) + case "keyVaultProperties": + err = unpopulate(val, "KeyVaultProperties", &e.KeyVaultProperties) + delete(rawMsg, key) + case "requireInfrastructureEncryption": + err = unpopulate(val, "RequireInfrastructureEncryption", &e.RequireInfrastructureEncryption) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ErrorAdditionalInfo. +func (e ErrorAdditionalInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateAny(objectMap, "info", e.Info) + populate(objectMap, "type", e.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ErrorAdditionalInfo. +func (e *ErrorAdditionalInfo) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "info": + err = unpopulate(val, "Info", &e.Info) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &e.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ErrorDetail. +func (e ErrorDetail) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "additionalInfo", e.AdditionalInfo) + populate(objectMap, "code", e.Code) + populate(objectMap, "details", e.Details) + populate(objectMap, "message", e.Message) + populate(objectMap, "target", e.Target) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ErrorDetail. +func (e *ErrorDetail) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "additionalInfo": + err = unpopulate(val, "AdditionalInfo", &e.AdditionalInfo) + delete(rawMsg, key) + case "code": + err = unpopulate(val, "Code", &e.Code) + delete(rawMsg, key) + case "details": + err = unpopulate(val, "Details", &e.Details) + delete(rawMsg, key) + case "message": + err = unpopulate(val, "Message", &e.Message) + delete(rawMsg, key) + case "target": + err = unpopulate(val, "Target", &e.Target) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ErrorResponse. +func (e ErrorResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "error", e.Error) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ErrorResponse. +func (e *ErrorResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "error": + err = unpopulate(val, "Error", &e.Error) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Eventhub. +func (e Eventhub) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", e.ID) + populate(objectMap, "location", e.Location) + populate(objectMap, "name", e.Name) + populate(objectMap, "properties", e.Properties) + populate(objectMap, "systemData", e.SystemData) + populate(objectMap, "type", e.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Eventhub. +func (e *Eventhub) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &e.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &e.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &e.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &e.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &e.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &e.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Identity. +func (i Identity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "principalId", i.PrincipalID) + populate(objectMap, "tenantId", i.TenantID) + populate(objectMap, "type", i.Type) + populate(objectMap, "userAssignedIdentities", i.UserAssignedIdentities) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Identity. +func (i *Identity) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "principalId": + err = unpopulate(val, "PrincipalID", &i.PrincipalID) + delete(rawMsg, key) + case "tenantId": + err = unpopulate(val, "TenantID", &i.TenantID) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &i.Type) + delete(rawMsg, key) + case "userAssignedIdentities": + err = unpopulate(val, "UserAssignedIdentities", &i.UserAssignedIdentities) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KeyVaultProperties. +func (k KeyVaultProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "identity", k.Identity) + populate(objectMap, "keyName", k.KeyName) + populate(objectMap, "keyVaultUri", k.KeyVaultURI) + populate(objectMap, "keyVersion", k.KeyVersion) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KeyVaultProperties. +func (k *KeyVaultProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "identity": + err = unpopulate(val, "Identity", &k.Identity) + delete(rawMsg, key) + case "keyName": + err = unpopulate(val, "KeyName", &k.KeyName) + delete(rawMsg, key) + case "keyVaultUri": + err = unpopulate(val, "KeyVaultURI", &k.KeyVaultURI) + delete(rawMsg, key) + case "keyVersion": + err = unpopulate(val, "KeyVersion", &k.KeyVersion) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListResult. +func (l ListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListResult. +func (l *ListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NWRuleSetIPRules. +func (n NWRuleSetIPRules) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "action", n.Action) + populate(objectMap, "ipMask", n.IPMask) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NWRuleSetIPRules. +func (n *NWRuleSetIPRules) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "action": + err = unpopulate(val, "Action", &n.Action) + delete(rawMsg, key) + case "ipMask": + err = unpopulate(val, "IPMask", &n.IPMask) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NWRuleSetVirtualNetworkRules. +func (n NWRuleSetVirtualNetworkRules) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "ignoreMissingVnetServiceEndpoint", n.IgnoreMissingVnetServiceEndpoint) + populate(objectMap, "subnet", n.Subnet) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NWRuleSetVirtualNetworkRules. +func (n *NWRuleSetVirtualNetworkRules) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "ignoreMissingVnetServiceEndpoint": + err = unpopulate(val, "IgnoreMissingVnetServiceEndpoint", &n.IgnoreMissingVnetServiceEndpoint) + delete(rawMsg, key) + case "subnet": + err = unpopulate(val, "Subnet", &n.Subnet) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NetworkRuleSet. +func (n NetworkRuleSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", n.ID) + populate(objectMap, "location", n.Location) + populate(objectMap, "name", n.Name) + populate(objectMap, "properties", n.Properties) + populate(objectMap, "systemData", n.SystemData) + populate(objectMap, "type", n.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NetworkRuleSet. +func (n *NetworkRuleSet) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &n.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &n.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &n.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &n.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &n.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &n.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NetworkRuleSetListResult. +func (n NetworkRuleSetListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", n.NextLink) + populate(objectMap, "value", n.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NetworkRuleSetListResult. +func (n *NetworkRuleSetListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &n.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &n.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NetworkRuleSetProperties. +func (n NetworkRuleSetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "defaultAction", n.DefaultAction) + populate(objectMap, "ipRules", n.IPRules) + populate(objectMap, "publicNetworkAccess", n.PublicNetworkAccess) + populate(objectMap, "trustedServiceAccessEnabled", n.TrustedServiceAccessEnabled) + populate(objectMap, "virtualNetworkRules", n.VirtualNetworkRules) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NetworkRuleSetProperties. +func (n *NetworkRuleSetProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "defaultAction": + err = unpopulate(val, "DefaultAction", &n.DefaultAction) + delete(rawMsg, key) + case "ipRules": + err = unpopulate(val, "IPRules", &n.IPRules) + delete(rawMsg, key) + case "publicNetworkAccess": + err = unpopulate(val, "PublicNetworkAccess", &n.PublicNetworkAccess) + delete(rawMsg, key) + case "trustedServiceAccessEnabled": + err = unpopulate(val, "TrustedServiceAccessEnabled", &n.TrustedServiceAccessEnabled) + delete(rawMsg, key) + case "virtualNetworkRules": + err = unpopulate(val, "VirtualNetworkRules", &n.VirtualNetworkRules) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Operation. +func (o Operation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "display", o.Display) + populate(objectMap, "isDataAction", o.IsDataAction) + populate(objectMap, "name", o.Name) + populate(objectMap, "origin", o.Origin) + populateAny(objectMap, "properties", o.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Operation. +func (o *Operation) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "display": + err = unpopulate(val, "Display", &o.Display) + delete(rawMsg, key) + case "isDataAction": + err = unpopulate(val, "IsDataAction", &o.IsDataAction) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &o.Name) + delete(rawMsg, key) + case "origin": + err = unpopulate(val, "Origin", &o.Origin) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &o.Properties) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type OperationDisplay. +func (o OperationDisplay) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", o.Description) + populate(objectMap, "operation", o.Operation) + populate(objectMap, "provider", o.Provider) + populate(objectMap, "resource", o.Resource) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type OperationDisplay. +func (o *OperationDisplay) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &o.Description) + delete(rawMsg, key) + case "operation": + err = unpopulate(val, "Operation", &o.Operation) + delete(rawMsg, key) + case "provider": + err = unpopulate(val, "Provider", &o.Provider) + delete(rawMsg, key) + case "resource": + err = unpopulate(val, "Resource", &o.Resource) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type OperationListResult. +func (o OperationListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", o.NextLink) + populate(objectMap, "value", o.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type OperationListResult. +func (o *OperationListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &o.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &o.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpoint. +func (p PrivateEndpoint) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpoint. +func (p *PrivateEndpoint) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnection. +func (p PrivateEndpointConnection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + populate(objectMap, "location", p.Location) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "systemData", p.SystemData) + populate(objectMap, "type", p.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnection. +func (p *PrivateEndpointConnection) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &p.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &p.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnectionListResult. +func (p PrivateEndpointConnectionListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnectionListResult. +func (p *PrivateEndpointConnectionListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &p.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrivateEndpointConnectionProperties. +func (p PrivateEndpointConnectionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "privateEndpoint", p.PrivateEndpoint) + populate(objectMap, "privateLinkServiceConnectionState", p.PrivateLinkServiceConnectionState) + populate(objectMap, "provisioningState", p.ProvisioningState) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateEndpointConnectionProperties. +func (p *PrivateEndpointConnectionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "privateEndpoint": + err = unpopulate(val, "PrivateEndpoint", &p.PrivateEndpoint) + delete(rawMsg, key) + case "privateLinkServiceConnectionState": + err = unpopulate(val, "PrivateLinkServiceConnectionState", &p.PrivateLinkServiceConnectionState) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &p.ProvisioningState) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResource. +func (p PrivateLinkResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "type", p.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResource. +func (p *PrivateLinkResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResourceProperties. +func (p PrivateLinkResourceProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "groupId", p.GroupID) + populate(objectMap, "requiredMembers", p.RequiredMembers) + populate(objectMap, "requiredZoneNames", p.RequiredZoneNames) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResourceProperties. +func (p *PrivateLinkResourceProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "groupId": + err = unpopulate(val, "GroupID", &p.GroupID) + delete(rawMsg, key) + case "requiredMembers": + err = unpopulate(val, "RequiredMembers", &p.RequiredMembers) + delete(rawMsg, key) + case "requiredZoneNames": + err = unpopulate(val, "RequiredZoneNames", &p.RequiredZoneNames) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrivateLinkResourcesListResult. +func (p PrivateLinkResourcesListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrivateLinkResourcesListResult. +func (p *PrivateLinkResourcesListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &p.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Properties. +func (p Properties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "captureDescription", p.CaptureDescription) + populateDateTimeRFC3339(objectMap, "createdAt", p.CreatedAt) + populate(objectMap, "messageRetentionInDays", p.MessageRetentionInDays) + populate(objectMap, "partitionCount", p.PartitionCount) + populate(objectMap, "partitionIds", p.PartitionIDs) + populate(objectMap, "status", p.Status) + populateDateTimeRFC3339(objectMap, "updatedAt", p.UpdatedAt) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Properties. +func (p *Properties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "captureDescription": + err = unpopulate(val, "CaptureDescription", &p.CaptureDescription) + delete(rawMsg, key) + case "createdAt": + err = unpopulateDateTimeRFC3339(val, "CreatedAt", &p.CreatedAt) + delete(rawMsg, key) + case "messageRetentionInDays": + err = unpopulate(val, "MessageRetentionInDays", &p.MessageRetentionInDays) + delete(rawMsg, key) + case "partitionCount": + err = unpopulate(val, "PartitionCount", &p.PartitionCount) + delete(rawMsg, key) + case "partitionIds": + err = unpopulate(val, "PartitionIDs", &p.PartitionIDs) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &p.Status) + delete(rawMsg, key) + case "updatedAt": + err = unpopulateDateTimeRFC3339(val, "UpdatedAt", &p.UpdatedAt) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ProxyResource. +func (p ProxyResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + populate(objectMap, "location", p.Location) + populate(objectMap, "name", p.Name) + populate(objectMap, "type", p.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ProxyResource. +func (p *ProxyResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &p.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RegenerateAccessKeyParameters. +func (r RegenerateAccessKeyParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "key", r.Key) + populate(objectMap, "keyType", r.KeyType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RegenerateAccessKeyParameters. +func (r *RegenerateAccessKeyParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "key": + err = unpopulate(val, "Key", &r.Key) + delete(rawMsg, key) + case "keyType": + err = unpopulate(val, "KeyType", &r.KeyType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", r.ID) + populate(objectMap, "name", r.Name) + populate(objectMap, "type", r.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Resource. +func (r *Resource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &r.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &r.Name) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &r.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SKU. +func (s SKU) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "capacity", s.Capacity) + populate(objectMap, "name", s.Name) + populate(objectMap, "tier", s.Tier) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SKU. +func (s *SKU) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "capacity": + err = unpopulate(val, "Capacity", &s.Capacity) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &s.Name) + delete(rawMsg, key) + case "tier": + err = unpopulate(val, "Tier", &s.Tier) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SchemaGroup. +func (s SchemaGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", s.ID) + populate(objectMap, "location", s.Location) + populate(objectMap, "name", s.Name) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "systemData", s.SystemData) + populate(objectMap, "type", s.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SchemaGroup. +func (s *SchemaGroup) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &s.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &s.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &s.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &s.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &s.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SchemaGroupListResult. +func (s SchemaGroupListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", s.NextLink) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SchemaGroupListResult. +func (s *SchemaGroupListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &s.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SchemaGroupProperties. +func (s SchemaGroupProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateDateTimeRFC3339(objectMap, "createdAtUtc", s.CreatedAtUTC) + populate(objectMap, "eTag", s.ETag) + populate(objectMap, "groupProperties", s.GroupProperties) + populate(objectMap, "schemaCompatibility", s.SchemaCompatibility) + populate(objectMap, "schemaType", s.SchemaType) + populateDateTimeRFC3339(objectMap, "updatedAtUtc", s.UpdatedAtUTC) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SchemaGroupProperties. +func (s *SchemaGroupProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "createdAtUtc": + err = unpopulateDateTimeRFC3339(val, "CreatedAtUTC", &s.CreatedAtUTC) + delete(rawMsg, key) + case "eTag": + err = unpopulate(val, "ETag", &s.ETag) + delete(rawMsg, key) + case "groupProperties": + err = unpopulate(val, "GroupProperties", &s.GroupProperties) + delete(rawMsg, key) + case "schemaCompatibility": + err = unpopulate(val, "SchemaCompatibility", &s.SchemaCompatibility) + delete(rawMsg, key) + case "schemaType": + err = unpopulate(val, "SchemaType", &s.SchemaType) + delete(rawMsg, key) + case "updatedAtUtc": + err = unpopulateDateTimeRFC3339(val, "UpdatedAtUTC", &s.UpdatedAtUTC) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Subnet. +func (s Subnet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", s.ID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Subnet. +func (s *Subnet) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SystemData. +func (s SystemData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateDateTimeRFC3339(objectMap, "createdAt", s.CreatedAt) + populate(objectMap, "createdBy", s.CreatedBy) + populate(objectMap, "createdByType", s.CreatedByType) + populateDateTimeRFC3339(objectMap, "lastModifiedAt", s.LastModifiedAt) + populate(objectMap, "lastModifiedBy", s.LastModifiedBy) + populate(objectMap, "lastModifiedByType", s.LastModifiedByType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SystemData. +func (s *SystemData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "createdAt": + err = unpopulateDateTimeRFC3339(val, "CreatedAt", &s.CreatedAt) + delete(rawMsg, key) + case "createdBy": + err = unpopulate(val, "CreatedBy", &s.CreatedBy) + delete(rawMsg, key) + case "createdByType": + err = unpopulate(val, "CreatedByType", &s.CreatedByType) + delete(rawMsg, key) + case "lastModifiedAt": + err = unpopulateDateTimeRFC3339(val, "LastModifiedAt", &s.LastModifiedAt) + delete(rawMsg, key) + case "lastModifiedBy": + err = unpopulate(val, "LastModifiedBy", &s.LastModifiedBy) + delete(rawMsg, key) + case "lastModifiedByType": + err = unpopulate(val, "LastModifiedByType", &s.LastModifiedByType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type TrackedResource. +func (t TrackedResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", t.ID) + populate(objectMap, "location", t.Location) + populate(objectMap, "name", t.Name) + populate(objectMap, "tags", t.Tags) + populate(objectMap, "type", t.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TrackedResource. +func (t *TrackedResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &t.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &t.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &t.Name) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &t.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &t.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UserAssignedIdentity. +func (u UserAssignedIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "clientId", u.ClientID) + populate(objectMap, "principalId", u.PrincipalID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserAssignedIdentity. +func (u *UserAssignedIdentity) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "clientId": + err = unpopulate(val, "ClientID", &u.ClientID) + delete(rawMsg, key) + case "principalId": + err = unpopulate(val, "PrincipalID", &u.PrincipalID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UserAssignedIdentityProperties. +func (u UserAssignedIdentityProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "userAssignedIdentity", u.UserAssignedIdentity) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UserAssignedIdentityProperties. +func (u *UserAssignedIdentityProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "userAssignedIdentity": + err = unpopulate(val, "UserAssignedIdentity", &u.UserAssignedIdentity) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateAny(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/namespaces_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/namespaces_client.go new file mode 100644 index 00000000000..7c9b44ee191 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/namespaces_client.go @@ -0,0 +1,1119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// NamespacesClient contains the methods for the Namespaces group. +// Don't use this type directly, use NewNamespacesClient() instead. +type NamespacesClient struct { + internal *arm.Client + subscriptionID string +} + +// NewNamespacesClient creates a new instance of NamespacesClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewNamespacesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*NamespacesClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &NamespacesClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CheckNameAvailability - Check the give Namespace name availability. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - parameters - Parameters to check availability of the given Namespace name +// - options - NamespacesClientCheckNameAvailabilityOptions contains the optional parameters for the NamespacesClient.CheckNameAvailability +// method. +func (client *NamespacesClient) CheckNameAvailability(ctx context.Context, parameters CheckNameAvailabilityParameter, options *NamespacesClientCheckNameAvailabilityOptions) (NamespacesClientCheckNameAvailabilityResponse, error) { + var err error + const operationName = "NamespacesClient.CheckNameAvailability" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.checkNameAvailabilityCreateRequest(ctx, parameters, options) + if err != nil { + return NamespacesClientCheckNameAvailabilityResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientCheckNameAvailabilityResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientCheckNameAvailabilityResponse{}, err + } + resp, err := client.checkNameAvailabilityHandleResponse(httpResp) + return resp, err +} + +// checkNameAvailabilityCreateRequest creates the CheckNameAvailability request. +func (client *NamespacesClient) checkNameAvailabilityCreateRequest(ctx context.Context, parameters CheckNameAvailabilityParameter, options *NamespacesClientCheckNameAvailabilityOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/checkNameAvailability" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// checkNameAvailabilityHandleResponse handles the CheckNameAvailability response. +func (client *NamespacesClient) checkNameAvailabilityHandleResponse(resp *http.Response) (NamespacesClientCheckNameAvailabilityResponse, error) { + result := NamespacesClientCheckNameAvailabilityResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CheckNameAvailabilityResult); err != nil { + return NamespacesClientCheckNameAvailabilityResponse{}, err + } + return result, nil +} + +// BeginCreateOrUpdate - Creates or updates a namespace. Once created, this namespace's resource manifest is immutable. This +// operation is idempotent. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - parameters - Parameters for creating a namespace resource. +// - options - NamespacesClientBeginCreateOrUpdateOptions contains the optional parameters for the NamespacesClient.BeginCreateOrUpdate +// method. +func (client *NamespacesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace, options *NamespacesClientBeginCreateOrUpdateOptions) (*runtime.Poller[NamespacesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, namespaceName, parameters, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[NamespacesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[NamespacesClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// CreateOrUpdate - Creates or updates a namespace. Once created, this namespace's resource manifest is immutable. This operation +// is idempotent. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +func (client *NamespacesClient) createOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace, options *NamespacesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + var err error + const operationName = "NamespacesClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, namespaceName, parameters, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *NamespacesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace, options *NamespacesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// CreateOrUpdateAuthorizationRule - Creates or updates an AuthorizationRule for a Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - authorizationRuleName - The authorization rule name. +// - parameters - The shared access AuthorizationRule. +// - options - NamespacesClientCreateOrUpdateAuthorizationRuleOptions contains the optional parameters for the NamespacesClient.CreateOrUpdateAuthorizationRule +// method. +func (client *NamespacesClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters AuthorizationRule, options *NamespacesClientCreateOrUpdateAuthorizationRuleOptions) (NamespacesClientCreateOrUpdateAuthorizationRuleResponse, error) { + var err error + const operationName = "NamespacesClient.CreateOrUpdateAuthorizationRule" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateAuthorizationRuleCreateRequest(ctx, resourceGroupName, namespaceName, authorizationRuleName, parameters, options) + if err != nil { + return NamespacesClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + resp, err := client.createOrUpdateAuthorizationRuleHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateAuthorizationRuleCreateRequest creates the CreateOrUpdateAuthorizationRule request. +func (client *NamespacesClient) createOrUpdateAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters AuthorizationRule, options *NamespacesClientCreateOrUpdateAuthorizationRuleOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateAuthorizationRuleHandleResponse handles the CreateOrUpdateAuthorizationRule response. +func (client *NamespacesClient) createOrUpdateAuthorizationRuleHandleResponse(resp *http.Response) (NamespacesClientCreateOrUpdateAuthorizationRuleResponse, error) { + result := NamespacesClientCreateOrUpdateAuthorizationRuleResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil { + return NamespacesClientCreateOrUpdateAuthorizationRuleResponse{}, err + } + return result, nil +} + +// CreateOrUpdateNetworkRuleSet - Create or update NetworkRuleSet for a Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - parameters - The Namespace IpFilterRule. +// - options - NamespacesClientCreateOrUpdateNetworkRuleSetOptions contains the optional parameters for the NamespacesClient.CreateOrUpdateNetworkRuleSet +// method. +func (client *NamespacesClient) CreateOrUpdateNetworkRuleSet(ctx context.Context, resourceGroupName string, namespaceName string, parameters NetworkRuleSet, options *NamespacesClientCreateOrUpdateNetworkRuleSetOptions) (NamespacesClientCreateOrUpdateNetworkRuleSetResponse, error) { + var err error + const operationName = "NamespacesClient.CreateOrUpdateNetworkRuleSet" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateNetworkRuleSetCreateRequest(ctx, resourceGroupName, namespaceName, parameters, options) + if err != nil { + return NamespacesClientCreateOrUpdateNetworkRuleSetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientCreateOrUpdateNetworkRuleSetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientCreateOrUpdateNetworkRuleSetResponse{}, err + } + resp, err := client.createOrUpdateNetworkRuleSetHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateNetworkRuleSetCreateRequest creates the CreateOrUpdateNetworkRuleSet request. +func (client *NamespacesClient) createOrUpdateNetworkRuleSetCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, parameters NetworkRuleSet, options *NamespacesClientCreateOrUpdateNetworkRuleSetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/networkRuleSets/default" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateNetworkRuleSetHandleResponse handles the CreateOrUpdateNetworkRuleSet response. +func (client *NamespacesClient) createOrUpdateNetworkRuleSetHandleResponse(resp *http.Response) (NamespacesClientCreateOrUpdateNetworkRuleSetResponse, error) { + result := NamespacesClientCreateOrUpdateNetworkRuleSetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.NetworkRuleSet); err != nil { + return NamespacesClientCreateOrUpdateNetworkRuleSetResponse{}, err + } + return result, nil +} + +// BeginDelete - Deletes an existing namespace. This operation also removes all associated resources under the namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - NamespacesClientBeginDeleteOptions contains the optional parameters for the NamespacesClient.BeginDelete method. +func (client *NamespacesClient) BeginDelete(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientBeginDeleteOptions) (*runtime.Poller[NamespacesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, namespaceName, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[NamespacesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[NamespacesClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Delete - Deletes an existing namespace. This operation also removes all associated resources under the namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +func (client *NamespacesClient) deleteOperation(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientBeginDeleteOptions) (*http.Response, error) { + var err error + const operationName = "NamespacesClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, namespaceName, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *NamespacesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// DeleteAuthorizationRule - Deletes an AuthorizationRule for a Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - authorizationRuleName - The authorization rule name. +// - options - NamespacesClientDeleteAuthorizationRuleOptions contains the optional parameters for the NamespacesClient.DeleteAuthorizationRule +// method. +func (client *NamespacesClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, options *NamespacesClientDeleteAuthorizationRuleOptions) (NamespacesClientDeleteAuthorizationRuleResponse, error) { + var err error + const operationName = "NamespacesClient.DeleteAuthorizationRule" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteAuthorizationRuleCreateRequest(ctx, resourceGroupName, namespaceName, authorizationRuleName, options) + if err != nil { + return NamespacesClientDeleteAuthorizationRuleResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientDeleteAuthorizationRuleResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientDeleteAuthorizationRuleResponse{}, err + } + return NamespacesClientDeleteAuthorizationRuleResponse{}, nil +} + +// deleteAuthorizationRuleCreateRequest creates the DeleteAuthorizationRule request. +func (client *NamespacesClient) deleteAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, options *NamespacesClientDeleteAuthorizationRuleOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets the description of the specified namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - NamespacesClientGetOptions contains the optional parameters for the NamespacesClient.Get method. +func (client *NamespacesClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientGetOptions) (NamespacesClientGetResponse, error) { + var err error + const operationName = "NamespacesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, namespaceName, options) + if err != nil { + return NamespacesClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *NamespacesClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *NamespacesClient) getHandleResponse(resp *http.Response) (NamespacesClientGetResponse, error) { + result := NamespacesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EHNamespace); err != nil { + return NamespacesClientGetResponse{}, err + } + return result, nil +} + +// GetAuthorizationRule - Gets an AuthorizationRule for a Namespace by rule name. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - authorizationRuleName - The authorization rule name. +// - options - NamespacesClientGetAuthorizationRuleOptions contains the optional parameters for the NamespacesClient.GetAuthorizationRule +// method. +func (client *NamespacesClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, options *NamespacesClientGetAuthorizationRuleOptions) (NamespacesClientGetAuthorizationRuleResponse, error) { + var err error + const operationName = "NamespacesClient.GetAuthorizationRule" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getAuthorizationRuleCreateRequest(ctx, resourceGroupName, namespaceName, authorizationRuleName, options) + if err != nil { + return NamespacesClientGetAuthorizationRuleResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientGetAuthorizationRuleResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientGetAuthorizationRuleResponse{}, err + } + resp, err := client.getAuthorizationRuleHandleResponse(httpResp) + return resp, err +} + +// getAuthorizationRuleCreateRequest creates the GetAuthorizationRule request. +func (client *NamespacesClient) getAuthorizationRuleCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, options *NamespacesClientGetAuthorizationRuleOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getAuthorizationRuleHandleResponse handles the GetAuthorizationRule response. +func (client *NamespacesClient) getAuthorizationRuleHandleResponse(resp *http.Response) (NamespacesClientGetAuthorizationRuleResponse, error) { + result := NamespacesClientGetAuthorizationRuleResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRule); err != nil { + return NamespacesClientGetAuthorizationRuleResponse{}, err + } + return result, nil +} + +// GetNetworkRuleSet - Gets NetworkRuleSet for a Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - NamespacesClientGetNetworkRuleSetOptions contains the optional parameters for the NamespacesClient.GetNetworkRuleSet +// method. +func (client *NamespacesClient) GetNetworkRuleSet(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientGetNetworkRuleSetOptions) (NamespacesClientGetNetworkRuleSetResponse, error) { + var err error + const operationName = "NamespacesClient.GetNetworkRuleSet" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getNetworkRuleSetCreateRequest(ctx, resourceGroupName, namespaceName, options) + if err != nil { + return NamespacesClientGetNetworkRuleSetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientGetNetworkRuleSetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientGetNetworkRuleSetResponse{}, err + } + resp, err := client.getNetworkRuleSetHandleResponse(httpResp) + return resp, err +} + +// getNetworkRuleSetCreateRequest creates the GetNetworkRuleSet request. +func (client *NamespacesClient) getNetworkRuleSetCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientGetNetworkRuleSetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/networkRuleSets/default" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getNetworkRuleSetHandleResponse handles the GetNetworkRuleSet response. +func (client *NamespacesClient) getNetworkRuleSetHandleResponse(resp *http.Response) (NamespacesClientGetNetworkRuleSetResponse, error) { + result := NamespacesClientGetNetworkRuleSetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.NetworkRuleSet); err != nil { + return NamespacesClientGetNetworkRuleSetResponse{}, err + } + return result, nil +} + +// NewListPager - Lists all the available Namespaces within a subscription, irrespective of the resource groups. +// +// Generated from API version 2021-11-01 +// - options - NamespacesClientListOptions contains the optional parameters for the NamespacesClient.NewListPager method. +func (client *NamespacesClient) NewListPager(options *NamespacesClientListOptions) *runtime.Pager[NamespacesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[NamespacesClientListResponse]{ + More: func(page NamespacesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *NamespacesClientListResponse) (NamespacesClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "NamespacesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) + if err != nil { + return NamespacesClientListResponse{}, err + } + return client.listHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listCreateRequest creates the List request. +func (client *NamespacesClient) listCreateRequest(ctx context.Context, options *NamespacesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/namespaces" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *NamespacesClient) listHandleResponse(resp *http.Response) (NamespacesClientListResponse, error) { + result := NamespacesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EHNamespaceListResult); err != nil { + return NamespacesClientListResponse{}, err + } + return result, nil +} + +// NewListAuthorizationRulesPager - Gets a list of authorization rules for a Namespace. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - NamespacesClientListAuthorizationRulesOptions contains the optional parameters for the NamespacesClient.NewListAuthorizationRulesPager +// method. +func (client *NamespacesClient) NewListAuthorizationRulesPager(resourceGroupName string, namespaceName string, options *NamespacesClientListAuthorizationRulesOptions) *runtime.Pager[NamespacesClientListAuthorizationRulesResponse] { + return runtime.NewPager(runtime.PagingHandler[NamespacesClientListAuthorizationRulesResponse]{ + More: func(page NamespacesClientListAuthorizationRulesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *NamespacesClientListAuthorizationRulesResponse) (NamespacesClientListAuthorizationRulesResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "NamespacesClient.NewListAuthorizationRulesPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listAuthorizationRulesCreateRequest(ctx, resourceGroupName, namespaceName, options) + }, nil) + if err != nil { + return NamespacesClientListAuthorizationRulesResponse{}, err + } + return client.listAuthorizationRulesHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listAuthorizationRulesCreateRequest creates the ListAuthorizationRules request. +func (client *NamespacesClient) listAuthorizationRulesCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientListAuthorizationRulesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAuthorizationRulesHandleResponse handles the ListAuthorizationRules response. +func (client *NamespacesClient) listAuthorizationRulesHandleResponse(resp *http.Response) (NamespacesClientListAuthorizationRulesResponse, error) { + result := NamespacesClientListAuthorizationRulesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AuthorizationRuleListResult); err != nil { + return NamespacesClientListAuthorizationRulesResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists the available Namespaces within a resource group. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - options - NamespacesClientListByResourceGroupOptions contains the optional parameters for the NamespacesClient.NewListByResourceGroupPager +// method. +func (client *NamespacesClient) NewListByResourceGroupPager(resourceGroupName string, options *NamespacesClientListByResourceGroupOptions) *runtime.Pager[NamespacesClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[NamespacesClientListByResourceGroupResponse]{ + More: func(page NamespacesClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *NamespacesClientListByResourceGroupResponse) (NamespacesClientListByResourceGroupResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "NamespacesClient.NewListByResourceGroupPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + }, nil) + if err != nil { + return NamespacesClientListByResourceGroupResponse{}, err + } + return client.listByResourceGroupHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *NamespacesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *NamespacesClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *NamespacesClient) listByResourceGroupHandleResponse(resp *http.Response) (NamespacesClientListByResourceGroupResponse, error) { + result := NamespacesClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EHNamespaceListResult); err != nil { + return NamespacesClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// ListKeys - Gets the primary and secondary connection strings for the Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - authorizationRuleName - The authorization rule name. +// - options - NamespacesClientListKeysOptions contains the optional parameters for the NamespacesClient.ListKeys method. +func (client *NamespacesClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, options *NamespacesClientListKeysOptions) (NamespacesClientListKeysResponse, error) { + var err error + const operationName = "NamespacesClient.ListKeys" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.listKeysCreateRequest(ctx, resourceGroupName, namespaceName, authorizationRuleName, options) + if err != nil { + return NamespacesClientListKeysResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientListKeysResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientListKeysResponse{}, err + } + resp, err := client.listKeysHandleResponse(httpResp) + return resp, err +} + +// listKeysCreateRequest creates the ListKeys request. +func (client *NamespacesClient) listKeysCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, options *NamespacesClientListKeysOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}/listKeys" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listKeysHandleResponse handles the ListKeys response. +func (client *NamespacesClient) listKeysHandleResponse(resp *http.Response) (NamespacesClientListKeysResponse, error) { + result := NamespacesClientListKeysResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil { + return NamespacesClientListKeysResponse{}, err + } + return result, nil +} + +// ListNetworkRuleSet - Gets NetworkRuleSet for a Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - NamespacesClientListNetworkRuleSetOptions contains the optional parameters for the NamespacesClient.ListNetworkRuleSet +// method. +func (client *NamespacesClient) ListNetworkRuleSet(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientListNetworkRuleSetOptions) (NamespacesClientListNetworkRuleSetResponse, error) { + var err error + const operationName = "NamespacesClient.ListNetworkRuleSet" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.listNetworkRuleSetCreateRequest(ctx, resourceGroupName, namespaceName, options) + if err != nil { + return NamespacesClientListNetworkRuleSetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientListNetworkRuleSetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientListNetworkRuleSetResponse{}, err + } + resp, err := client.listNetworkRuleSetHandleResponse(httpResp) + return resp, err +} + +// listNetworkRuleSetCreateRequest creates the ListNetworkRuleSet request. +func (client *NamespacesClient) listNetworkRuleSetCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *NamespacesClientListNetworkRuleSetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/networkRuleSets" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listNetworkRuleSetHandleResponse handles the ListNetworkRuleSet response. +func (client *NamespacesClient) listNetworkRuleSetHandleResponse(resp *http.Response) (NamespacesClientListNetworkRuleSetResponse, error) { + result := NamespacesClientListNetworkRuleSetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.NetworkRuleSetListResult); err != nil { + return NamespacesClientListNetworkRuleSetResponse{}, err + } + return result, nil +} + +// RegenerateKeys - Regenerates the primary or secondary connection strings for the specified Namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - authorizationRuleName - The authorization rule name. +// - parameters - Parameters required to regenerate the connection string. +// - options - NamespacesClientRegenerateKeysOptions contains the optional parameters for the NamespacesClient.RegenerateKeys +// method. +func (client *NamespacesClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters, options *NamespacesClientRegenerateKeysOptions) (NamespacesClientRegenerateKeysResponse, error) { + var err error + const operationName = "NamespacesClient.RegenerateKeys" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.regenerateKeysCreateRequest(ctx, resourceGroupName, namespaceName, authorizationRuleName, parameters, options) + if err != nil { + return NamespacesClientRegenerateKeysResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientRegenerateKeysResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientRegenerateKeysResponse{}, err + } + resp, err := client.regenerateKeysHandleResponse(httpResp) + return resp, err +} + +// regenerateKeysCreateRequest creates the RegenerateKeys request. +func (client *NamespacesClient) regenerateKeysCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters, options *NamespacesClientRegenerateKeysOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}/regenerateKeys" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if authorizationRuleName == "" { + return nil, errors.New("parameter authorizationRuleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{authorizationRuleName}", url.PathEscape(authorizationRuleName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// regenerateKeysHandleResponse handles the RegenerateKeys response. +func (client *NamespacesClient) regenerateKeysHandleResponse(resp *http.Response) (NamespacesClientRegenerateKeysResponse, error) { + result := NamespacesClientRegenerateKeysResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AccessKeys); err != nil { + return NamespacesClientRegenerateKeysResponse{}, err + } + return result, nil +} + +// Update - Creates or updates a namespace. Once created, this namespace's resource manifest is immutable. This operation +// is idempotent. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - parameters - Parameters for updating a namespace resource. +// - options - NamespacesClientUpdateOptions contains the optional parameters for the NamespacesClient.Update method. +func (client *NamespacesClient) Update(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace, options *NamespacesClientUpdateOptions) (NamespacesClientUpdateResponse, error) { + var err error + const operationName = "NamespacesClient.Update" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.updateCreateRequest(ctx, resourceGroupName, namespaceName, parameters, options) + if err != nil { + return NamespacesClientUpdateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return NamespacesClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return NamespacesClientUpdateResponse{}, err + } + resp, err := client.updateHandleResponse(httpResp) + return resp, err +} + +// updateCreateRequest creates the Update request. +func (client *NamespacesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace, options *NamespacesClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// updateHandleResponse handles the Update response. +func (client *NamespacesClient) updateHandleResponse(resp *http.Response) (NamespacesClientUpdateResponse, error) { + result := NamespacesClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.EHNamespace); err != nil { + return NamespacesClientUpdateResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/operations_client.go new file mode 100644 index 00000000000..3f9c220bef5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/operations_client.go @@ -0,0 +1,88 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" +) + +// OperationsClient contains the methods for the Operations group. +// Don't use this type directly, use NewOperationsClient() instead. +type OperationsClient struct { + internal *arm.Client +} + +// NewOperationsClient creates a new instance of OperationsClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*OperationsClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &OperationsClient{ + internal: cl, + } + return client, nil +} + +// NewListPager - Lists all of the available Event Hub REST API operations. +// +// Generated from API version 2021-11-01 +// - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. +func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ + More: func(page OperationsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *OperationsClientListResponse) (OperationsClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "OperationsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, options) + }, nil) + if err != nil { + return OperationsClientListResponse{}, err + } + return client.listHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listCreateRequest creates the List request. +func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsClientListOptions) (*policy.Request, error) { + urlPath := "/providers/Microsoft.EventHub/operations" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *OperationsClient) listHandleResponse(resp *http.Response) (OperationsClientListResponse, error) { + result := OperationsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil { + return OperationsClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/options.go new file mode 100644 index 00000000000..946453499e4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/options.go @@ -0,0 +1,367 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +// ClustersClientBeginCreateOrUpdateOptions contains the optional parameters for the ClustersClient.BeginCreateOrUpdate method. +type ClustersClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ClustersClientBeginDeleteOptions contains the optional parameters for the ClustersClient.BeginDelete method. +type ClustersClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ClustersClientBeginUpdateOptions contains the optional parameters for the ClustersClient.BeginUpdate method. +type ClustersClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ClustersClientGetOptions contains the optional parameters for the ClustersClient.Get method. +type ClustersClientGetOptions struct { + // placeholder for future optional parameters +} + +// ClustersClientListAvailableClusterRegionOptions contains the optional parameters for the ClustersClient.ListAvailableClusterRegion +// method. +type ClustersClientListAvailableClusterRegionOptions struct { + // placeholder for future optional parameters +} + +// ClustersClientListByResourceGroupOptions contains the optional parameters for the ClustersClient.NewListByResourceGroupPager +// method. +type ClustersClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// ClustersClientListBySubscriptionOptions contains the optional parameters for the ClustersClient.NewListBySubscriptionPager +// method. +type ClustersClientListBySubscriptionOptions struct { + // placeholder for future optional parameters +} + +// ClustersClientListNamespacesOptions contains the optional parameters for the ClustersClient.ListNamespaces method. +type ClustersClientListNamespacesOptions struct { + // placeholder for future optional parameters +} + +// ConfigurationClientGetOptions contains the optional parameters for the ConfigurationClient.Get method. +type ConfigurationClientGetOptions struct { + // placeholder for future optional parameters +} + +// ConfigurationClientPatchOptions contains the optional parameters for the ConfigurationClient.Patch method. +type ConfigurationClientPatchOptions struct { + // placeholder for future optional parameters +} + +// ConsumerGroupsClientCreateOrUpdateOptions contains the optional parameters for the ConsumerGroupsClient.CreateOrUpdate +// method. +type ConsumerGroupsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// ConsumerGroupsClientDeleteOptions contains the optional parameters for the ConsumerGroupsClient.Delete method. +type ConsumerGroupsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// ConsumerGroupsClientGetOptions contains the optional parameters for the ConsumerGroupsClient.Get method. +type ConsumerGroupsClientGetOptions struct { + // placeholder for future optional parameters +} + +// ConsumerGroupsClientListByEventHubOptions contains the optional parameters for the ConsumerGroupsClient.NewListByEventHubPager +// method. +type ConsumerGroupsClientListByEventHubOptions struct { + // Skip is only used if a previous operation returned a partial result. If a previous response contains a nextLink element, + // the value of the nextLink element will include a skip parameter that specifies + // a starting point to use for subsequent calls. + Skip *int32 + + // May be used to limit the number of results to the most recent N usageDetails. + Top *int32 +} + +// DisasterRecoveryConfigsClientBreakPairingOptions contains the optional parameters for the DisasterRecoveryConfigsClient.BreakPairing +// method. +type DisasterRecoveryConfigsClientBreakPairingOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientCheckNameAvailabilityOptions contains the optional parameters for the DisasterRecoveryConfigsClient.CheckNameAvailability +// method. +type DisasterRecoveryConfigsClientCheckNameAvailabilityOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientCreateOrUpdateOptions contains the optional parameters for the DisasterRecoveryConfigsClient.CreateOrUpdate +// method. +type DisasterRecoveryConfigsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientDeleteOptions contains the optional parameters for the DisasterRecoveryConfigsClient.Delete +// method. +type DisasterRecoveryConfigsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientFailOverOptions contains the optional parameters for the DisasterRecoveryConfigsClient.FailOver +// method. +type DisasterRecoveryConfigsClientFailOverOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientGetAuthorizationRuleOptions contains the optional parameters for the DisasterRecoveryConfigsClient.GetAuthorizationRule +// method. +type DisasterRecoveryConfigsClientGetAuthorizationRuleOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientGetOptions contains the optional parameters for the DisasterRecoveryConfigsClient.Get method. +type DisasterRecoveryConfigsClientGetOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientListAuthorizationRulesOptions contains the optional parameters for the DisasterRecoveryConfigsClient.NewListAuthorizationRulesPager +// method. +type DisasterRecoveryConfigsClientListAuthorizationRulesOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientListKeysOptions contains the optional parameters for the DisasterRecoveryConfigsClient.ListKeys +// method. +type DisasterRecoveryConfigsClientListKeysOptions struct { + // placeholder for future optional parameters +} + +// DisasterRecoveryConfigsClientListOptions contains the optional parameters for the DisasterRecoveryConfigsClient.NewListPager +// method. +type DisasterRecoveryConfigsClientListOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientCreateOrUpdateAuthorizationRuleOptions contains the optional parameters for the EventHubsClient.CreateOrUpdateAuthorizationRule +// method. +type EventHubsClientCreateOrUpdateAuthorizationRuleOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientCreateOrUpdateOptions contains the optional parameters for the EventHubsClient.CreateOrUpdate method. +type EventHubsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientDeleteAuthorizationRuleOptions contains the optional parameters for the EventHubsClient.DeleteAuthorizationRule +// method. +type EventHubsClientDeleteAuthorizationRuleOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientDeleteOptions contains the optional parameters for the EventHubsClient.Delete method. +type EventHubsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientGetAuthorizationRuleOptions contains the optional parameters for the EventHubsClient.GetAuthorizationRule +// method. +type EventHubsClientGetAuthorizationRuleOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientGetOptions contains the optional parameters for the EventHubsClient.Get method. +type EventHubsClientGetOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientListAuthorizationRulesOptions contains the optional parameters for the EventHubsClient.NewListAuthorizationRulesPager +// method. +type EventHubsClientListAuthorizationRulesOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientListByNamespaceOptions contains the optional parameters for the EventHubsClient.NewListByNamespacePager +// method. +type EventHubsClientListByNamespaceOptions struct { + // Skip is only used if a previous operation returned a partial result. If a previous response contains a nextLink element, + // the value of the nextLink element will include a skip parameter that specifies + // a starting point to use for subsequent calls. + Skip *int32 + + // May be used to limit the number of results to the most recent N usageDetails. + Top *int32 +} + +// EventHubsClientListKeysOptions contains the optional parameters for the EventHubsClient.ListKeys method. +type EventHubsClientListKeysOptions struct { + // placeholder for future optional parameters +} + +// EventHubsClientRegenerateKeysOptions contains the optional parameters for the EventHubsClient.RegenerateKeys method. +type EventHubsClientRegenerateKeysOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientBeginCreateOrUpdateOptions contains the optional parameters for the NamespacesClient.BeginCreateOrUpdate +// method. +type NamespacesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// NamespacesClientBeginDeleteOptions contains the optional parameters for the NamespacesClient.BeginDelete method. +type NamespacesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// NamespacesClientCheckNameAvailabilityOptions contains the optional parameters for the NamespacesClient.CheckNameAvailability +// method. +type NamespacesClientCheckNameAvailabilityOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientCreateOrUpdateAuthorizationRuleOptions contains the optional parameters for the NamespacesClient.CreateOrUpdateAuthorizationRule +// method. +type NamespacesClientCreateOrUpdateAuthorizationRuleOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientCreateOrUpdateNetworkRuleSetOptions contains the optional parameters for the NamespacesClient.CreateOrUpdateNetworkRuleSet +// method. +type NamespacesClientCreateOrUpdateNetworkRuleSetOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientDeleteAuthorizationRuleOptions contains the optional parameters for the NamespacesClient.DeleteAuthorizationRule +// method. +type NamespacesClientDeleteAuthorizationRuleOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientGetAuthorizationRuleOptions contains the optional parameters for the NamespacesClient.GetAuthorizationRule +// method. +type NamespacesClientGetAuthorizationRuleOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientGetNetworkRuleSetOptions contains the optional parameters for the NamespacesClient.GetNetworkRuleSet method. +type NamespacesClientGetNetworkRuleSetOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientGetOptions contains the optional parameters for the NamespacesClient.Get method. +type NamespacesClientGetOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientListAuthorizationRulesOptions contains the optional parameters for the NamespacesClient.NewListAuthorizationRulesPager +// method. +type NamespacesClientListAuthorizationRulesOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientListByResourceGroupOptions contains the optional parameters for the NamespacesClient.NewListByResourceGroupPager +// method. +type NamespacesClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientListKeysOptions contains the optional parameters for the NamespacesClient.ListKeys method. +type NamespacesClientListKeysOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientListNetworkRuleSetOptions contains the optional parameters for the NamespacesClient.ListNetworkRuleSet +// method. +type NamespacesClientListNetworkRuleSetOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientListOptions contains the optional parameters for the NamespacesClient.NewListPager method. +type NamespacesClientListOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientRegenerateKeysOptions contains the optional parameters for the NamespacesClient.RegenerateKeys method. +type NamespacesClientRegenerateKeysOptions struct { + // placeholder for future optional parameters +} + +// NamespacesClientUpdateOptions contains the optional parameters for the NamespacesClient.Update method. +type NamespacesClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. +type OperationsClientListOptions struct { + // placeholder for future optional parameters +} + +// PrivateEndpointConnectionsClientBeginDeleteOptions contains the optional parameters for the PrivateEndpointConnectionsClient.BeginDelete +// method. +type PrivateEndpointConnectionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PrivateEndpointConnectionsClientCreateOrUpdateOptions contains the optional parameters for the PrivateEndpointConnectionsClient.CreateOrUpdate +// method. +type PrivateEndpointConnectionsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// PrivateEndpointConnectionsClientGetOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Get +// method. +type PrivateEndpointConnectionsClientGetOptions struct { + // placeholder for future optional parameters +} + +// PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.NewListPager +// method. +type PrivateEndpointConnectionsClientListOptions struct { + // placeholder for future optional parameters +} + +// PrivateLinkResourcesClientGetOptions contains the optional parameters for the PrivateLinkResourcesClient.Get method. +type PrivateLinkResourcesClientGetOptions struct { + // placeholder for future optional parameters +} + +// SchemaRegistryClientCreateOrUpdateOptions contains the optional parameters for the SchemaRegistryClient.CreateOrUpdate +// method. +type SchemaRegistryClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// SchemaRegistryClientDeleteOptions contains the optional parameters for the SchemaRegistryClient.Delete method. +type SchemaRegistryClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// SchemaRegistryClientGetOptions contains the optional parameters for the SchemaRegistryClient.Get method. +type SchemaRegistryClientGetOptions struct { + // placeholder for future optional parameters +} + +// SchemaRegistryClientListByNamespaceOptions contains the optional parameters for the SchemaRegistryClient.NewListByNamespacePager +// method. +type SchemaRegistryClientListByNamespaceOptions struct { + // Skip is only used if a previous operation returned a partial result. If a previous response contains a nextLink element, + // the value of the nextLink element will include a skip parameter that specifies + // a starting point to use for subsequent calls. + Skip *int32 + + // May be used to limit the number of results to the most recent N usageDetails. + Top *int32 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_client.go new file mode 100644 index 00000000000..fd3cbd1260a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_client.go @@ -0,0 +1,335 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// PrivateEndpointConnectionsClient contains the methods for the PrivateEndpointConnections group. +// Don't use this type directly, use NewPrivateEndpointConnectionsClient() instead. +type PrivateEndpointConnectionsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewPrivateEndpointConnectionsClient creates a new instance of PrivateEndpointConnectionsClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewPrivateEndpointConnectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PrivateEndpointConnectionsClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &PrivateEndpointConnectionsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CreateOrUpdate - Creates or updates PrivateEndpointConnections of service namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - privateEndpointConnectionName - The PrivateEndpointConnection name +// - parameters - Parameters supplied to update Status of PrivateEndPoint Connection to namespace resource. +// - options - PrivateEndpointConnectionsClientCreateOrUpdateOptions contains the optional parameters for the PrivateEndpointConnectionsClient.CreateOrUpdate +// method. +func (client *PrivateEndpointConnectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, privateEndpointConnectionName string, parameters PrivateEndpointConnection, options *PrivateEndpointConnectionsClientCreateOrUpdateOptions) (PrivateEndpointConnectionsClientCreateOrUpdateResponse, error) { + var err error + const operationName = "PrivateEndpointConnectionsClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, namespaceName, privateEndpointConnectionName, parameters, options) + if err != nil { + return PrivateEndpointConnectionsClientCreateOrUpdateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrivateEndpointConnectionsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PrivateEndpointConnectionsClientCreateOrUpdateResponse{}, err + } + resp, err := client.createOrUpdateHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *PrivateEndpointConnectionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, privateEndpointConnectionName string, parameters PrivateEndpointConnection, options *PrivateEndpointConnectionsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if privateEndpointConnectionName == "" { + return nil, errors.New("parameter privateEndpointConnectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{privateEndpointConnectionName}", url.PathEscape(privateEndpointConnectionName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *PrivateEndpointConnectionsClient) createOrUpdateHandleResponse(resp *http.Response) (PrivateEndpointConnectionsClientCreateOrUpdateResponse, error) { + result := PrivateEndpointConnectionsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrivateEndpointConnection); err != nil { + return PrivateEndpointConnectionsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// BeginDelete - Deletes an existing namespace. This operation also removes all associated resources under the namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - privateEndpointConnectionName - The PrivateEndpointConnection name +// - options - PrivateEndpointConnectionsClientBeginDeleteOptions contains the optional parameters for the PrivateEndpointConnectionsClient.BeginDelete +// method. +func (client *PrivateEndpointConnectionsClient) BeginDelete(ctx context.Context, resourceGroupName string, namespaceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientBeginDeleteOptions) (*runtime.Poller[PrivateEndpointConnectionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, namespaceName, privateEndpointConnectionName, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrivateEndpointConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[PrivateEndpointConnectionsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Delete - Deletes an existing namespace. This operation also removes all associated resources under the namespace. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +func (client *PrivateEndpointConnectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, namespaceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientBeginDeleteOptions) (*http.Response, error) { + var err error + const operationName = "PrivateEndpointConnectionsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, namespaceName, privateEndpointConnectionName, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *PrivateEndpointConnectionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if privateEndpointConnectionName == "" { + return nil, errors.New("parameter privateEndpointConnectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{privateEndpointConnectionName}", url.PathEscape(privateEndpointConnectionName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets a description for the specified Private Endpoint Connection name. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - privateEndpointConnectionName - The PrivateEndpointConnection name +// - options - PrivateEndpointConnectionsClientGetOptions contains the optional parameters for the PrivateEndpointConnectionsClient.Get +// method. +func (client *PrivateEndpointConnectionsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientGetOptions) (PrivateEndpointConnectionsClientGetResponse, error) { + var err error + const operationName = "PrivateEndpointConnectionsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, namespaceName, privateEndpointConnectionName, options) + if err != nil { + return PrivateEndpointConnectionsClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrivateEndpointConnectionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PrivateEndpointConnectionsClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *PrivateEndpointConnectionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, privateEndpointConnectionName string, options *PrivateEndpointConnectionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if privateEndpointConnectionName == "" { + return nil, errors.New("parameter privateEndpointConnectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{privateEndpointConnectionName}", url.PathEscape(privateEndpointConnectionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *PrivateEndpointConnectionsClient) getHandleResponse(resp *http.Response) (PrivateEndpointConnectionsClientGetResponse, error) { + result := PrivateEndpointConnectionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrivateEndpointConnection); err != nil { + return PrivateEndpointConnectionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Gets the available PrivateEndpointConnections within a namespace. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - PrivateEndpointConnectionsClientListOptions contains the optional parameters for the PrivateEndpointConnectionsClient.NewListPager +// method. +func (client *PrivateEndpointConnectionsClient) NewListPager(resourceGroupName string, namespaceName string, options *PrivateEndpointConnectionsClientListOptions) *runtime.Pager[PrivateEndpointConnectionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[PrivateEndpointConnectionsClientListResponse]{ + More: func(page PrivateEndpointConnectionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *PrivateEndpointConnectionsClientListResponse) (PrivateEndpointConnectionsClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "PrivateEndpointConnectionsClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, namespaceName, options) + }, nil) + if err != nil { + return PrivateEndpointConnectionsClientListResponse{}, err + } + return client.listHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listCreateRequest creates the List request. +func (client *PrivateEndpointConnectionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *PrivateEndpointConnectionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *PrivateEndpointConnectionsClient) listHandleResponse(resp *http.Response) (PrivateEndpointConnectionsClientListResponse, error) { + result := PrivateEndpointConnectionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrivateEndpointConnectionListResult); err != nil { + return PrivateEndpointConnectionsClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privatelinkresources_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privatelinkresources_client.go new file mode 100644 index 00000000000..a67286f1017 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/privatelinkresources_client.go @@ -0,0 +1,110 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// PrivateLinkResourcesClient contains the methods for the PrivateLinkResources group. +// Don't use this type directly, use NewPrivateLinkResourcesClient() instead. +type PrivateLinkResourcesClient struct { + internal *arm.Client + subscriptionID string +} + +// NewPrivateLinkResourcesClient creates a new instance of PrivateLinkResourcesClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewPrivateLinkResourcesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PrivateLinkResourcesClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &PrivateLinkResourcesClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// Get - Gets lists of resources that supports Privatelinks. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - PrivateLinkResourcesClientGetOptions contains the optional parameters for the PrivateLinkResourcesClient.Get +// method. +func (client *PrivateLinkResourcesClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, options *PrivateLinkResourcesClientGetOptions) (PrivateLinkResourcesClientGetResponse, error) { + var err error + const operationName = "PrivateLinkResourcesClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, namespaceName, options) + if err != nil { + return PrivateLinkResourcesClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrivateLinkResourcesClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PrivateLinkResourcesClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *PrivateLinkResourcesClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *PrivateLinkResourcesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/privateLinkResources" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *PrivateLinkResourcesClient) getHandleResponse(resp *http.Response) (PrivateLinkResourcesClientGetResponse, error) { + result := PrivateLinkResourcesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrivateLinkResourcesListResult); err != nil { + return PrivateLinkResourcesClientGetResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/response_types.go new file mode 100644 index 00000000000..8a42c0df4c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/response_types.go @@ -0,0 +1,359 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +// ClustersClientCreateOrUpdateResponse contains the response from method ClustersClient.BeginCreateOrUpdate. +type ClustersClientCreateOrUpdateResponse struct { + // Single Event Hubs Cluster resource in List or Get operations. + Cluster +} + +// ClustersClientDeleteResponse contains the response from method ClustersClient.BeginDelete. +type ClustersClientDeleteResponse struct { + // placeholder for future response values +} + +// ClustersClientGetResponse contains the response from method ClustersClient.Get. +type ClustersClientGetResponse struct { + // Single Event Hubs Cluster resource in List or Get operations. + Cluster +} + +// ClustersClientListAvailableClusterRegionResponse contains the response from method ClustersClient.ListAvailableClusterRegion. +type ClustersClientListAvailableClusterRegionResponse struct { + // The response of the List Available Clusters operation. + AvailableClustersList +} + +// ClustersClientListByResourceGroupResponse contains the response from method ClustersClient.NewListByResourceGroupPager. +type ClustersClientListByResourceGroupResponse struct { + // The response of the List Event Hubs Clusters operation. + ClusterListResult +} + +// ClustersClientListBySubscriptionResponse contains the response from method ClustersClient.NewListBySubscriptionPager. +type ClustersClientListBySubscriptionResponse struct { + // The response of the List Event Hubs Clusters operation. + ClusterListResult +} + +// ClustersClientListNamespacesResponse contains the response from method ClustersClient.ListNamespaces. +type ClustersClientListNamespacesResponse struct { + // The response of the List Namespace IDs operation + EHNamespaceIDListResult +} + +// ClustersClientUpdateResponse contains the response from method ClustersClient.BeginUpdate. +type ClustersClientUpdateResponse struct { + // Single Event Hubs Cluster resource in List or Get operations. + Cluster +} + +// ConfigurationClientGetResponse contains the response from method ConfigurationClient.Get. +type ConfigurationClientGetResponse struct { + // Contains all settings for the cluster. + ClusterQuotaConfigurationProperties +} + +// ConfigurationClientPatchResponse contains the response from method ConfigurationClient.Patch. +type ConfigurationClientPatchResponse struct { + // Contains all settings for the cluster. + ClusterQuotaConfigurationProperties +} + +// ConsumerGroupsClientCreateOrUpdateResponse contains the response from method ConsumerGroupsClient.CreateOrUpdate. +type ConsumerGroupsClientCreateOrUpdateResponse struct { + // Single item in List or Get Consumer group operation + ConsumerGroup +} + +// ConsumerGroupsClientDeleteResponse contains the response from method ConsumerGroupsClient.Delete. +type ConsumerGroupsClientDeleteResponse struct { + // placeholder for future response values +} + +// ConsumerGroupsClientGetResponse contains the response from method ConsumerGroupsClient.Get. +type ConsumerGroupsClientGetResponse struct { + // Single item in List or Get Consumer group operation + ConsumerGroup +} + +// ConsumerGroupsClientListByEventHubResponse contains the response from method ConsumerGroupsClient.NewListByEventHubPager. +type ConsumerGroupsClientListByEventHubResponse struct { + // The result to the List Consumer Group operation. + ConsumerGroupListResult +} + +// DisasterRecoveryConfigsClientBreakPairingResponse contains the response from method DisasterRecoveryConfigsClient.BreakPairing. +type DisasterRecoveryConfigsClientBreakPairingResponse struct { + // placeholder for future response values +} + +// DisasterRecoveryConfigsClientCheckNameAvailabilityResponse contains the response from method DisasterRecoveryConfigsClient.CheckNameAvailability. +type DisasterRecoveryConfigsClientCheckNameAvailabilityResponse struct { + // The Result of the CheckNameAvailability operation + CheckNameAvailabilityResult +} + +// DisasterRecoveryConfigsClientCreateOrUpdateResponse contains the response from method DisasterRecoveryConfigsClient.CreateOrUpdate. +type DisasterRecoveryConfigsClientCreateOrUpdateResponse struct { + // Single item in List or Get Alias(Disaster Recovery configuration) operation + ArmDisasterRecovery +} + +// DisasterRecoveryConfigsClientDeleteResponse contains the response from method DisasterRecoveryConfigsClient.Delete. +type DisasterRecoveryConfigsClientDeleteResponse struct { + // placeholder for future response values +} + +// DisasterRecoveryConfigsClientFailOverResponse contains the response from method DisasterRecoveryConfigsClient.FailOver. +type DisasterRecoveryConfigsClientFailOverResponse struct { + // placeholder for future response values +} + +// DisasterRecoveryConfigsClientGetAuthorizationRuleResponse contains the response from method DisasterRecoveryConfigsClient.GetAuthorizationRule. +type DisasterRecoveryConfigsClientGetAuthorizationRuleResponse struct { + // Single item in a List or Get AuthorizationRule operation + AuthorizationRule +} + +// DisasterRecoveryConfigsClientGetResponse contains the response from method DisasterRecoveryConfigsClient.Get. +type DisasterRecoveryConfigsClientGetResponse struct { + // Single item in List or Get Alias(Disaster Recovery configuration) operation + ArmDisasterRecovery +} + +// DisasterRecoveryConfigsClientListAuthorizationRulesResponse contains the response from method DisasterRecoveryConfigsClient.NewListAuthorizationRulesPager. +type DisasterRecoveryConfigsClientListAuthorizationRulesResponse struct { + // The response from the List namespace operation. + AuthorizationRuleListResult +} + +// DisasterRecoveryConfigsClientListKeysResponse contains the response from method DisasterRecoveryConfigsClient.ListKeys. +type DisasterRecoveryConfigsClientListKeysResponse struct { + // Namespace/EventHub Connection String + AccessKeys +} + +// DisasterRecoveryConfigsClientListResponse contains the response from method DisasterRecoveryConfigsClient.NewListPager. +type DisasterRecoveryConfigsClientListResponse struct { + // The result of the List Alias(Disaster Recovery configuration) operation. + ArmDisasterRecoveryListResult +} + +// EventHubsClientCreateOrUpdateAuthorizationRuleResponse contains the response from method EventHubsClient.CreateOrUpdateAuthorizationRule. +type EventHubsClientCreateOrUpdateAuthorizationRuleResponse struct { + // Single item in a List or Get AuthorizationRule operation + AuthorizationRule +} + +// EventHubsClientCreateOrUpdateResponse contains the response from method EventHubsClient.CreateOrUpdate. +type EventHubsClientCreateOrUpdateResponse struct { + // Single item in List or Get Event Hub operation + Eventhub +} + +// EventHubsClientDeleteAuthorizationRuleResponse contains the response from method EventHubsClient.DeleteAuthorizationRule. +type EventHubsClientDeleteAuthorizationRuleResponse struct { + // placeholder for future response values +} + +// EventHubsClientDeleteResponse contains the response from method EventHubsClient.Delete. +type EventHubsClientDeleteResponse struct { + // placeholder for future response values +} + +// EventHubsClientGetAuthorizationRuleResponse contains the response from method EventHubsClient.GetAuthorizationRule. +type EventHubsClientGetAuthorizationRuleResponse struct { + // Single item in a List or Get AuthorizationRule operation + AuthorizationRule +} + +// EventHubsClientGetResponse contains the response from method EventHubsClient.Get. +type EventHubsClientGetResponse struct { + // Single item in List or Get Event Hub operation + Eventhub +} + +// EventHubsClientListAuthorizationRulesResponse contains the response from method EventHubsClient.NewListAuthorizationRulesPager. +type EventHubsClientListAuthorizationRulesResponse struct { + // The response from the List namespace operation. + AuthorizationRuleListResult +} + +// EventHubsClientListByNamespaceResponse contains the response from method EventHubsClient.NewListByNamespacePager. +type EventHubsClientListByNamespaceResponse struct { + // The result of the List EventHubs operation. + ListResult +} + +// EventHubsClientListKeysResponse contains the response from method EventHubsClient.ListKeys. +type EventHubsClientListKeysResponse struct { + // Namespace/EventHub Connection String + AccessKeys +} + +// EventHubsClientRegenerateKeysResponse contains the response from method EventHubsClient.RegenerateKeys. +type EventHubsClientRegenerateKeysResponse struct { + // Namespace/EventHub Connection String + AccessKeys +} + +// NamespacesClientCheckNameAvailabilityResponse contains the response from method NamespacesClient.CheckNameAvailability. +type NamespacesClientCheckNameAvailabilityResponse struct { + // The Result of the CheckNameAvailability operation + CheckNameAvailabilityResult +} + +// NamespacesClientCreateOrUpdateAuthorizationRuleResponse contains the response from method NamespacesClient.CreateOrUpdateAuthorizationRule. +type NamespacesClientCreateOrUpdateAuthorizationRuleResponse struct { + // Single item in a List or Get AuthorizationRule operation + AuthorizationRule +} + +// NamespacesClientCreateOrUpdateNetworkRuleSetResponse contains the response from method NamespacesClient.CreateOrUpdateNetworkRuleSet. +type NamespacesClientCreateOrUpdateNetworkRuleSetResponse struct { + // Description of topic resource. + NetworkRuleSet +} + +// NamespacesClientCreateOrUpdateResponse contains the response from method NamespacesClient.BeginCreateOrUpdate. +type NamespacesClientCreateOrUpdateResponse struct { + // Single Namespace item in List or Get Operation + EHNamespace +} + +// NamespacesClientDeleteAuthorizationRuleResponse contains the response from method NamespacesClient.DeleteAuthorizationRule. +type NamespacesClientDeleteAuthorizationRuleResponse struct { + // placeholder for future response values +} + +// NamespacesClientDeleteResponse contains the response from method NamespacesClient.BeginDelete. +type NamespacesClientDeleteResponse struct { + // placeholder for future response values +} + +// NamespacesClientGetAuthorizationRuleResponse contains the response from method NamespacesClient.GetAuthorizationRule. +type NamespacesClientGetAuthorizationRuleResponse struct { + // Single item in a List or Get AuthorizationRule operation + AuthorizationRule +} + +// NamespacesClientGetNetworkRuleSetResponse contains the response from method NamespacesClient.GetNetworkRuleSet. +type NamespacesClientGetNetworkRuleSetResponse struct { + // Description of topic resource. + NetworkRuleSet +} + +// NamespacesClientGetResponse contains the response from method NamespacesClient.Get. +type NamespacesClientGetResponse struct { + // Single Namespace item in List or Get Operation + EHNamespace +} + +// NamespacesClientListAuthorizationRulesResponse contains the response from method NamespacesClient.NewListAuthorizationRulesPager. +type NamespacesClientListAuthorizationRulesResponse struct { + // The response from the List namespace operation. + AuthorizationRuleListResult +} + +// NamespacesClientListByResourceGroupResponse contains the response from method NamespacesClient.NewListByResourceGroupPager. +type NamespacesClientListByResourceGroupResponse struct { + // The response of the List Namespace operation + EHNamespaceListResult +} + +// NamespacesClientListKeysResponse contains the response from method NamespacesClient.ListKeys. +type NamespacesClientListKeysResponse struct { + // Namespace/EventHub Connection String + AccessKeys +} + +// NamespacesClientListNetworkRuleSetResponse contains the response from method NamespacesClient.ListNetworkRuleSet. +type NamespacesClientListNetworkRuleSetResponse struct { + // The response of the List NetworkRuleSet operation + NetworkRuleSetListResult +} + +// NamespacesClientListResponse contains the response from method NamespacesClient.NewListPager. +type NamespacesClientListResponse struct { + // The response of the List Namespace operation + EHNamespaceListResult +} + +// NamespacesClientRegenerateKeysResponse contains the response from method NamespacesClient.RegenerateKeys. +type NamespacesClientRegenerateKeysResponse struct { + // Namespace/EventHub Connection String + AccessKeys +} + +// NamespacesClientUpdateResponse contains the response from method NamespacesClient.Update. +type NamespacesClientUpdateResponse struct { + // Single Namespace item in List or Get Operation + EHNamespace +} + +// OperationsClientListResponse contains the response from method OperationsClient.NewListPager. +type OperationsClientListResponse struct { + // Result of the request to list Event Hub operations. It contains a list of operations and a URL link to get the next set + // of results. + OperationListResult +} + +// PrivateEndpointConnectionsClientCreateOrUpdateResponse contains the response from method PrivateEndpointConnectionsClient.CreateOrUpdate. +type PrivateEndpointConnectionsClientCreateOrUpdateResponse struct { + // Properties of the PrivateEndpointConnection. + PrivateEndpointConnection +} + +// PrivateEndpointConnectionsClientDeleteResponse contains the response from method PrivateEndpointConnectionsClient.BeginDelete. +type PrivateEndpointConnectionsClientDeleteResponse struct { + // placeholder for future response values +} + +// PrivateEndpointConnectionsClientGetResponse contains the response from method PrivateEndpointConnectionsClient.Get. +type PrivateEndpointConnectionsClientGetResponse struct { + // Properties of the PrivateEndpointConnection. + PrivateEndpointConnection +} + +// PrivateEndpointConnectionsClientListResponse contains the response from method PrivateEndpointConnectionsClient.NewListPager. +type PrivateEndpointConnectionsClientListResponse struct { + // Result of the list of all private endpoint connections operation. + PrivateEndpointConnectionListResult +} + +// PrivateLinkResourcesClientGetResponse contains the response from method PrivateLinkResourcesClient.Get. +type PrivateLinkResourcesClientGetResponse struct { + // Result of the List private link resources operation. + PrivateLinkResourcesListResult +} + +// SchemaRegistryClientCreateOrUpdateResponse contains the response from method SchemaRegistryClient.CreateOrUpdate. +type SchemaRegistryClientCreateOrUpdateResponse struct { + // Single item in List or Get Schema Group operation + SchemaGroup +} + +// SchemaRegistryClientDeleteResponse contains the response from method SchemaRegistryClient.Delete. +type SchemaRegistryClientDeleteResponse struct { + // placeholder for future response values +} + +// SchemaRegistryClientGetResponse contains the response from method SchemaRegistryClient.Get. +type SchemaRegistryClientGetResponse struct { + // Single item in List or Get Schema Group operation + SchemaGroup +} + +// SchemaRegistryClientListByNamespaceResponse contains the response from method SchemaRegistryClient.NewListByNamespacePager. +type SchemaRegistryClientListByNamespaceResponse struct { + // The result of the List SchemaGroup operation. + SchemaGroupListResult +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/schemaregistry_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/schemaregistry_client.go new file mode 100644 index 00000000000..5657f5713ca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/schemaregistry_client.go @@ -0,0 +1,319 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// SchemaRegistryClient contains the methods for the SchemaRegistry group. +// Don't use this type directly, use NewSchemaRegistryClient() instead. +type SchemaRegistryClient struct { + internal *arm.Client + subscriptionID string +} + +// NewSchemaRegistryClient creates a new instance of SchemaRegistryClient with the specified values. +// - subscriptionID - Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewSchemaRegistryClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SchemaRegistryClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &SchemaRegistryClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// CreateOrUpdate - +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - schemaGroupName - The Schema Group name +// - parameters - Parameters supplied to create an Event Hub resource. +// - options - SchemaRegistryClientCreateOrUpdateOptions contains the optional parameters for the SchemaRegistryClient.CreateOrUpdate +// method. +func (client *SchemaRegistryClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, parameters SchemaGroup, options *SchemaRegistryClientCreateOrUpdateOptions) (SchemaRegistryClientCreateOrUpdateResponse, error) { + var err error + const operationName = "SchemaRegistryClient.CreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, namespaceName, schemaGroupName, parameters, options) + if err != nil { + return SchemaRegistryClientCreateOrUpdateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SchemaRegistryClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return SchemaRegistryClientCreateOrUpdateResponse{}, err + } + resp, err := client.createOrUpdateHandleResponse(httpResp) + return resp, err +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *SchemaRegistryClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, parameters SchemaGroup, options *SchemaRegistryClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if schemaGroupName == "" { + return nil, errors.New("parameter schemaGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{schemaGroupName}", url.PathEscape(schemaGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *SchemaRegistryClient) createOrUpdateHandleResponse(resp *http.Response) (SchemaRegistryClientCreateOrUpdateResponse, error) { + result := SchemaRegistryClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroup); err != nil { + return SchemaRegistryClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// Delete - +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - schemaGroupName - The Schema Group name +// - options - SchemaRegistryClientDeleteOptions contains the optional parameters for the SchemaRegistryClient.Delete method. +func (client *SchemaRegistryClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, options *SchemaRegistryClientDeleteOptions) (SchemaRegistryClientDeleteResponse, error) { + var err error + const operationName = "SchemaRegistryClient.Delete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, namespaceName, schemaGroupName, options) + if err != nil { + return SchemaRegistryClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SchemaRegistryClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return SchemaRegistryClientDeleteResponse{}, err + } + return SchemaRegistryClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *SchemaRegistryClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, options *SchemaRegistryClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if schemaGroupName == "" { + return nil, errors.New("parameter schemaGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{schemaGroupName}", url.PathEscape(schemaGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - schemaGroupName - The Schema Group name +// - options - SchemaRegistryClientGetOptions contains the optional parameters for the SchemaRegistryClient.Get method. +func (client *SchemaRegistryClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, options *SchemaRegistryClientGetOptions) (SchemaRegistryClientGetResponse, error) { + var err error + const operationName = "SchemaRegistryClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, namespaceName, schemaGroupName, options) + if err != nil { + return SchemaRegistryClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SchemaRegistryClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return SchemaRegistryClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *SchemaRegistryClient) getCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, schemaGroupName string, options *SchemaRegistryClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups/{schemaGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if schemaGroupName == "" { + return nil, errors.New("parameter schemaGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{schemaGroupName}", url.PathEscape(schemaGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *SchemaRegistryClient) getHandleResponse(resp *http.Response) (SchemaRegistryClientGetResponse, error) { + result := SchemaRegistryClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroup); err != nil { + return SchemaRegistryClientGetResponse{}, err + } + return result, nil +} + +// NewListByNamespacePager - Gets all the Schema Groups in a Namespace. +// +// Generated from API version 2021-11-01 +// - resourceGroupName - Name of the resource group within the azure subscription. +// - namespaceName - The Namespace name +// - options - SchemaRegistryClientListByNamespaceOptions contains the optional parameters for the SchemaRegistryClient.NewListByNamespacePager +// method. +func (client *SchemaRegistryClient) NewListByNamespacePager(resourceGroupName string, namespaceName string, options *SchemaRegistryClientListByNamespaceOptions) *runtime.Pager[SchemaRegistryClientListByNamespaceResponse] { + return runtime.NewPager(runtime.PagingHandler[SchemaRegistryClientListByNamespaceResponse]{ + More: func(page SchemaRegistryClientListByNamespaceResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SchemaRegistryClientListByNamespaceResponse) (SchemaRegistryClientListByNamespaceResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "SchemaRegistryClient.NewListByNamespacePager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByNamespaceCreateRequest(ctx, resourceGroupName, namespaceName, options) + }, nil) + if err != nil { + return SchemaRegistryClientListByNamespaceResponse{}, err + } + return client.listByNamespaceHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listByNamespaceCreateRequest creates the ListByNamespace request. +func (client *SchemaRegistryClient) listByNamespaceCreateRequest(ctx context.Context, resourceGroupName string, namespaceName string, options *SchemaRegistryClientListByNamespaceOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/schemagroups" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if namespaceName == "" { + return nil, errors.New("parameter namespaceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{namespaceName}", url.PathEscape(namespaceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-11-01") + if options != nil && options.Skip != nil { + reqQP.Set("$skip", strconv.FormatInt(int64(*options.Skip), 10)) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByNamespaceHandleResponse handles the ListByNamespace response. +func (client *SchemaRegistryClient) listByNamespaceHandleResponse(resp *http.Response) (SchemaRegistryClientListByNamespaceResponse, error) { + result := SchemaRegistryClientListByNamespaceResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SchemaGroupListResult); err != nil { + return SchemaRegistryClientListByNamespaceResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/time_rfc3339.go new file mode 100644 index 00000000000..d9611dac386 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub/time_rfc3339.go @@ -0,0 +1,86 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "regexp" + "strings" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time + +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON + if tzOffsetRegex.Match(data) { + layout = dateTimeJSON + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = dateTimeRFC3339(p) + return err +} + +func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*dateTimeRFC3339)(t) +} + +func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux dateTimeRFC3339 + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/CHANGELOG.md new file mode 100644 index 00000000000..2eb9f6435d7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/CHANGELOG.md @@ -0,0 +1,154 @@ +# Release History + +## 1.1.0 (2024-02-13) + +### Other Changes +* Upgraded to API service version `7.5` +* Upgraded dependencies + +## 1.1.0-beta.1 (2023-11-08) + +### Other Changes +* Upgraded service version to `7.5-preview.1` +* Updated to latest version of `azcore`. +* Enabled spans for distributed tracing. + +## 1.0.1 (2023-08-22) + +### Other Changes +* Upgraded dependencies + +## 1.0.0 (2023-07-17) + +### Features Added +* first stable release of `azsecrets` module + +### Breaking Changes +* changed type of `KID` from string to type `ID` + +## 0.14.0 (2023-06-08) + +### Breaking Changes +* Renamed `Client.ListSecrets` to `Client.ListSecretProperties` +* Renamed `Client.ListSecretVersions` to `Client.ListSecretPropertiesVersions` +* Renamed `SecretBundle` to `Secret` +* Renamed `DeletedSecretBundle` to `DeletedSecret` +* Renamed `SecretItem` to `SecretProperties` +* Renamed `DeletedSecretItem` to `DeletedSecretProperties` +* Renamed `Kid` to `KID` +* Removed `DeletionRecoveryLevel` type +* Remove `MaxResults` option + +### Other Changes +* Updated dependencies + +## 0.13.0 (2023-04-13) + +### Breaking Changes +* Moved from `sdk/keyvault/azsecrets` to `sdk/security/keyvault/azsecrets` + +## 0.12.0 (2023-04-13) + +### Features Added +* upgraded to api version 7.4 + +## 0.11.0 (2022-11-08) + +### Breaking Changes +* `NewClient` returns an `error` + +## 0.10.1 (2022-09-20) + +### Features Added +* Added `ClientOptions.DisableChallengeResourceVerification`. + See https://aka.ms/azsdk/blog/vault-uri for more information. + +## 0.10.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. + +## 0.9.0 (2022-08-09) + +### Breaking Changes +* Changed type of `NewClient` options parameter to `azsecrets.ClientOptions`, which embeds + the former type, `azcore.ClientOptions` + +## 0.8.0 (2022-07-07) + +### Breaking Changes +* The `Client` API now corresponds more directly to the Key Vault REST API. + Most method signatures and types have changed. See the + [module documentation](https://aka.ms/azsdk/go/keyvault-secrets/docs) + for updated code examples and more details. + +### Other Changes +* Upgrade to latest `azcore` + +## 0.7.1 (2022-05-12) + +### Other Changes +* Updated to latest `azcore` and `internal` modules. + +## 0.7.0 (2022-04-06) + +### Features Added +* Added `PossibleDeletionRecoveryLevelValues` to iterate over all valid `DeletionRecoveryLevel` values +* Implemented generic pagers from `runtime.Pager` for all List operations +* Added `Name *string` to `DeletedSecret`, `Properties`, `Secret`, `SecretItem`, and `SecretItem` +* Added `Client.VaultURL` to determine the vault URL for debugging +* Adding `ResumeToken` method to pollers for resuming polling at a later date by using the added `ResumeToken` optional parameter on client polling methods + +### Breaking Changes +* Requires a minimum version of go 1.18 +* Removed `RawResponse` from pollers +* Removed `DeletionRecoveryLevel` +* Polling operations return a Poller struct directly instead of a Response envelope +* Removed `ToPtr` methods +* `Client.UpdateSecretProperties` takes a `Secret` +* Renamed `Client.ListSecrets` to `Client.ListPropertiesOfSecrets` +* Renamed `Client.ListSecretVersions` to `Client.ListPropertiesOfSecretVersions` +* Renamed `DeletedDate` to `DeletedOn` and `Managed` to `IsManaged` +* Moved `ContentType`, `Tags`, `KeyID`, and `IsManaged` to `Properties` + +## 0.6.0 (2022-03-08) + +### Breaking Changes +* Changes `Attributes` to `Properties` +* Changes `Secret.KID` to `Secret.KeyID` +* Changes `DeletedSecretBundle` to `DeletedSecret` +* Changes `DeletedDate` to `DeletedOn`, `Created` to `CreatedOn`, and `Updated` to `UpdatedOn` +* Changes the signature of `Client.UpdateSecretProperties` to have all alterable properties in the `UpdateSecretPropertiesOptions` parameter, removing the `parameters Properties` parameter. +* Changes `Item` to `SecretItem` +* Pollers and pagers are structs instead of interfaces +* Prefixed all `DeletionRecoveryLevel` constants with "DeletionRecoveryLevel" +* Changed pager APIs for `ListSecretVersionsPager`, `ListDeletedSecretsPager`, and `ListSecretsPager` + * Use the `More()` method to determine if there are more pages to fetch + * Use the `NextPage(context.Context)` to fetch the next page of results +* Removed all `RawResponse *http.Response` fields from response structs. + +## 0.5.0 (2022-02-08) + +### Breaking Changes +* Fixes a bug where `UpdateSecretProperties` will delete properties that are not explicitly set each time. This is only a breaking change at runtime, where the request body will change. + +## 0.4.0 (2022-01-11) + +### Other Changes +* Bumps `azcore` dependency from `v0.20.0` to `v0.21.0` + +## 0.3.0 (2021-11-09) + +### Features Added +* Clients can now connect to Key Vaults in any cloud + +## 0.2.0 (2021-11-02) + +### Other Changes +* Bumps `azcore` dependency to `v0.20.0` and `azidentity` to `v0.12.0` + +## 0.1.1 (2021-10-06) +* Adds the MIT License for redistribution + +## 0.1.0 (2021-10-05) +* This is the initial release of the `azsecrets` library diff --git a/vendor/github.com/Azure/azure-pipeline-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/LICENSE.txt similarity index 100% rename from vendor/github.com/Azure/azure-pipeline-go/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/LICENSE.txt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/README.md new file mode 100644 index 00000000000..e34c527bf00 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/README.md @@ -0,0 +1,144 @@ +# Azure Key Vault Secrets client module for Go + +Azure Key Vault helps solve the following problems: +* Secrets management (this module) - securely store and control access to tokens, passwords, certificates, API keys, and other secrets +* Managed HSM administration ([azadmin](https://aka.ms/azsdk/go/keyvault-admin/docs)) - role-based access control (RBAC), settings, and vault-level backup and restore options +* Certificate management ([azcertificates](https://aka.ms/azsdk/go/keyvault-certificates/docs)) - create, manage, and deploy public and private SSL/TLS certificates +* Cryptographic key management ([azkeys](https://azsdk/go/keyvault-keys/docs)) - create, store, and control access to the keys used to encrypt your data + +[Source code][module_source] | [Package (pkg.go.dev)][reference_docs] | [Product documentation][keyvault_docs] | [Samples][secrets_samples] + +## Getting started + +### Install packages + +Install `azsecrets` and `azidentity` with `go get`: +``` +go get github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` +[azidentity][azure_identity] is used for Azure Active Directory authentication as demonstrated below. + + +### Prerequisites + +* An [Azure subscription][azure_sub] +* A supported Go version (the Azure SDK supports the two most recent Go releases) +* A key vault. If you need to create one, see the Key Vault documentation for instructions on doing so in the [Azure Portal][azure_keyvault_portal] or with the [Azure CLI][azure_keyvault_cli]. + +### Authentication + +This document demonstrates using [azidentity.NewDefaultAzureCredential][default_cred_ref] to authenticate. This credential type works in both local development and production environments. We recommend using a [managed identity][managed_identity] in production. + +[Client][client_docs] accepts any [azidentity][azure_identity] credential. See the [azidentity][azure_identity] documentation for more information about other credential types. + +#### Create a client + +Constructing the client also requires your vault's URL, which you can get from the Azure CLI or the Azure Portal. + +```golang +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets" +) + +func main() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + // TODO: handle error + } + + client := azsecrets.NewClient("https://.vault.azure.net", cred, nil) +} +``` + +## Key concepts + +### Secret + +A secret consists of a secret value and its associated metadata and management information. This library handles secret values as strings, but Azure Key Vault doesn't store them as such. For more information about secrets and how Key Vault stores and manages them, see the [Key Vault documentation](https://docs.microsoft.com/azure/key-vault/general/about-keys-secrets-certificates). + +`azseecrets.Client` can set secret values in the vault, update secret metadata, and delete secrets, as shown in the examples below. + +## Examples + +Get started with our [examples][secrets_samples]. + +## Troubleshooting + +### Error Handling + +All methods which send HTTP requests return `*azcore.ResponseError` when these requests fail. `ResponseError` has error details and the raw response from Key Vault. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore" + +resp, err := client.GetSecret(context.Background(), "secretName", nil) +if err != nil { + var httpErr *azcore.ResponseError + if errors.As(err, &httpErr) { + // TODO: investigate httpErr + } else { + // TODO: not an HTTP error + } +} +``` + +### Logging + +This module uses the logging implementation in `azcore`. To turn on logging for all Azure SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. By default the logger writes to stderr. Use the `azcore/log` package to control log output. For example, logging only HTTP request and response events, and printing them to stdout: + +```go +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + +// Print log events to stdout +azlog.SetListener(func(cls azlog.Event, msg string) { + fmt.Println(msg) +}) + +// Includes only requests and responses in logs +azlog.SetEvents(azlog.EventRequest, azlog.EventResponse) +``` + +### Accessing `http.Response` + +You can access the raw `*http.Response` returned by Key Vault using the `runtime.WithCaptureResponse` method and a context passed to any client method. + +```go +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +var response *http.Response +ctx := runtime.WithCaptureResponse(context.TODO(), &response) +_, err = client.GetSecret(ctx, "secretName", nil) +if err != nil { + // TODO: handle error +} +// TODO: do something with response +``` + +### Additional Documentation + +See the [API reference documentation][reference_docs] for complete documentation of this module. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact opencode@microsoft.com with any additional questions or comments. + +[azure_identity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[azure_keyvault_cli]: https://docs.microsoft.com/azure/key-vault/general/quick-create-cli +[azure_keyvault_portal]: https://docs.microsoft.com/azure/key-vault/general/quick-create-portal +[azure_sub]: https://azure.microsoft.com/free/ +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[default_cred_ref]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity#defaultazurecredential +[keyvault_docs]: https://docs.microsoft.com/azure/key-vault/ +[managed_identity]: https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +[reference_docs]: https://aka.ms/azsdk/go/keyvault-secrets/docs +[client_docs]: https://aka.ms/azsdk/go/keyvault-secrets/docs#Client +[module_source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/security/keyvault/azsecrets +[secrets_samples]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets#pkg-examples + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fsecurity%2Fkeyvault%2Fazsecrets%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/TROUBLESHOOTING.md new file mode 100644 index 00000000000..64d2a4f397d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/TROUBLESHOOTING.md @@ -0,0 +1,4 @@ +# Troubleshoot Azure Key Vault Secrets Client Module Issues + +See our [Azure Key Vault SDK Troubleshooting Guide](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/security/keyvault/TROUBLESHOOTING.md) +to troubleshoot issues common to Azure Key Vault client modules. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/assets.json new file mode 100644 index 00000000000..a6769e311c5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/security/keyvault/azsecrets", + "Tag": "go/security/keyvault/azsecrets_46bc7ae56f" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/autorest.md new file mode 100644 index 00000000000..73ebe3a4eca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/autorest.md @@ -0,0 +1,141 @@ +## Go + +```yaml +clear-output-folder: false +export-clients: true +go: true +input-file: https://github.com/Azure/azure-rest-api-specs/blob/7452e1cc7db72fbc6cd9539b390d8b8e5c2a1864/specification/keyvault/data-plane/Microsoft.KeyVault/stable/7.5/secrets.json +license-header: MICROSOFT_MIT_NO_VERSION +module: github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets +openapi-type: "data-plane" +output-folder: ../azsecrets +override-client-name: Client +security: "AADToken" +security-scopes: "https://vault.azure.net/.default" +use: "@autorest/go@4.0.0-preview.59" +inject-spans: true +version: "^3.0.0" +directive: + # delete unused model + - remove-model: SecretProperties + + # make vault URL a parameter of the client constructor + - from: swagger-document + where: $["x-ms-parameterized-host"] + transform: $.parameters[0]["x-ms-parameter-location"] = "client" + + # rename parameter models to match their methods + - rename-model: + from: SecretRestoreParameters + to: RestoreSecretParameters + - rename-model: + from: SecretSetParameters + to: SetSecretParameters + - rename-model: + from: SecretUpdateParameters + to: UpdateSecretParameters + - rename-model: + from: SecretBundle + to: Secret + - rename-model: + from: DeletedSecretBundle + to: DeletedSecret + - rename-model: + from: SecretItem + to: SecretProperties + - rename-model: + from: DeletedSecretItem + to: DeletedSecretProperties + - rename-model: + from: UpdateSecretParameters + to: UpdateSecretPropertiesParameters + - rename-model: + from: DeletedSecretListResult + to: DeletedSecretPropertiesListResult + - rename-model: + from: SecretListResult + to: SecretPropertiesListResult + + # rename operations + - rename-operation: + from: GetDeletedSecrets + to: ListDeletedSecretProperties + - rename-operation: + from: GetSecrets + to: ListSecretProperties + - rename-operation: + from: GetSecretVersions + to: ListSecretPropertiesVersions + - rename-operation: + from: UpdateSecret + to: UpdateSecretProperties + + # rename fields + - from: swagger-document + where: $.definitions.RestoreSecretParameters.properties.value + transform: $["x-ms-client-name"] = "SecretBackup" + - from: swagger-document + where: $.definitions.Secret.properties.kid + transform: $["x-ms-client-name"] = "KID" + + # remove type DeletionRecoveryLevel, use string instead + - from: models.go + where: $ + transform: return $.replace(/DeletionRecoveryLevel/g, "string"); + + # Remove MaxResults parameter + - where: "$.paths..*" + remove-parameter: + in: query + name: maxresults + + # delete unused error models + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type (?:Error|KeyVaultError).+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(\w \*?(?:Error|KeyVaultError)\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the Attributes model defined in common.json (it's used only with allOf) + - from: models.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+type Attributes.+\{(?:\s.+\s)+\}\s/g, ""); + - from: models_serde.go + where: $ + transform: return $.replace(/(?:\/\/.*\s)+func \(a \*?Attributes\).*\{\s(?:.+\s)+\}\s/g, ""); + + # delete the version path param check (version == "" is legal for Key Vault but indescribable by OpenAPI) + - from: client.go + where: $ + transform: return $.replace(/\sif secretVersion == "" \{\s+.+secretVersion cannot be empty"\)\s+\}\s/g, ""); + + # delete client name prefix from method options and response types + - from: + - client.go + - models.go + - options.go + - response_types.go + where: $ + transform: return $.replace(/Client(\w+)((?:Options|Response))/g, "$1$2"); + + # make secret IDs a convenience type so we can add parsing methods + - from: models.go + where: $ + transform: return $.replace(/(\sID \*)string(\s+.*)/g, "$1ID$2") + - from: models.go + where: $ + transform: return $.replace(/(\sKID \*)string(\s+.*)/g, "$1ID$2") + + # Maxresults -> MaxResults + - from: + - client.go + - models.go + where: $ + transform: return $.replace(/Maxresults/g, "MaxResults") + + # secretName, secretVersion -> name, version + - from: client.go + - where: $ + - transform: return $.replace(/secretName/g, "name").replace(/secretVersion/g, "version") +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/build.go new file mode 100644 index 00000000000..95af012822a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/build.go @@ -0,0 +1,11 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate rm ./constants.go +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azsecrets diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/ci.yml new file mode 100644 index 00000000000..f9ad1f16660 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/ci.yml @@ -0,0 +1,30 @@ + +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/azsecrets + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/azsecrets + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'security/keyvault/azsecrets' + RunLiveTests: true + UsePipelineProxy: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/client.go new file mode 100644 index 00000000000..685643ceb32 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/client.go @@ -0,0 +1,677 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azsecrets + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// Client contains the methods for the Client group. +// Don't use this type directly, use a constructor function instead. +type Client struct { + internal *azcore.Client + endpoint string +} + +// BackupSecret - Requests that a backup of the specified secret be downloaded to the client. All versions of the secret will +// be downloaded. This operation requires the secrets/backup permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the secret. +// - options - BackupSecretOptions contains the optional parameters for the Client.BackupSecret method. +func (client *Client) BackupSecret(ctx context.Context, name string, options *BackupSecretOptions) (BackupSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.BackupSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.backupSecretCreateRequest(ctx, name, options) + if err != nil { + return BackupSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BackupSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BackupSecretResponse{}, err + } + resp, err := client.backupSecretHandleResponse(httpResp) + return resp, err +} + +// backupSecretCreateRequest creates the BackupSecret request. +func (client *Client) backupSecretCreateRequest(ctx context.Context, name string, options *BackupSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/backup" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// backupSecretHandleResponse handles the BackupSecret response. +func (client *Client) backupSecretHandleResponse(resp *http.Response) (BackupSecretResponse, error) { + result := BackupSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.BackupSecretResult); err != nil { + return BackupSecretResponse{}, err + } + return result, nil +} + +// DeleteSecret - The DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an individual +// version of a secret. This operation requires the secrets/delete permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the secret. +// - options - DeleteSecretOptions contains the optional parameters for the Client.DeleteSecret method. +func (client *Client) DeleteSecret(ctx context.Context, name string, options *DeleteSecretOptions) (DeleteSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.DeleteSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteSecretCreateRequest(ctx, name, options) + if err != nil { + return DeleteSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return DeleteSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return DeleteSecretResponse{}, err + } + resp, err := client.deleteSecretHandleResponse(httpResp) + return resp, err +} + +// deleteSecretCreateRequest creates the DeleteSecret request. +func (client *Client) deleteSecretCreateRequest(ctx context.Context, name string, options *DeleteSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// deleteSecretHandleResponse handles the DeleteSecret response. +func (client *Client) deleteSecretHandleResponse(resp *http.Response) (DeleteSecretResponse, error) { + result := DeleteSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecret); err != nil { + return DeleteSecretResponse{}, err + } + return result, nil +} + +// GetDeletedSecret - The Get Deleted Secret operation returns the specified deleted secret along with its attributes. This +// operation requires the secrets/get permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the secret. +// - options - GetDeletedSecretOptions contains the optional parameters for the Client.GetDeletedSecret method. +func (client *Client) GetDeletedSecret(ctx context.Context, name string, options *GetDeletedSecretOptions) (GetDeletedSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.GetDeletedSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return GetDeletedSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return GetDeletedSecretResponse{}, err + } + resp, err := client.getDeletedSecretHandleResponse(httpResp) + return resp, err +} + +// getDeletedSecretCreateRequest creates the GetDeletedSecret request. +func (client *Client) getDeletedSecretCreateRequest(ctx context.Context, name string, options *GetDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getDeletedSecretHandleResponse handles the GetDeletedSecret response. +func (client *Client) getDeletedSecretHandleResponse(resp *http.Response) (GetDeletedSecretResponse, error) { + result := GetDeletedSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecret); err != nil { + return GetDeletedSecretResponse{}, err + } + return result, nil +} + +// GetSecret - The GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the secrets/get +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the secret. +// - version - The version of the secret. This URI fragment is optional. If not specified, the latest version of the secret +// is returned. +// - options - GetSecretOptions contains the optional parameters for the Client.GetSecret method. +func (client *Client) GetSecret(ctx context.Context, name string, version string, options *GetSecretOptions) (GetSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.GetSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getSecretCreateRequest(ctx, name, version, options) + if err != nil { + return GetSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GetSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return GetSecretResponse{}, err + } + resp, err := client.getSecretHandleResponse(httpResp) + return resp, err +} + +// getSecretCreateRequest creates the GetSecret request. +func (client *Client) getSecretCreateRequest(ctx context.Context, name string, version string, options *GetSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/{secret-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{secret-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getSecretHandleResponse handles the GetSecret response. +func (client *Client) getSecretHandleResponse(resp *http.Response) (GetSecretResponse, error) { + result := GetSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Secret); err != nil { + return GetSecretResponse{}, err + } + return result, nil +} + +// NewListDeletedSecretPropertiesPager - The Get Deleted Secrets operation returns the secrets that have been deleted for +// a vault enabled for soft-delete. This operation requires the secrets/list permission. +// +// Generated from API version 7.5 +// - options - ListDeletedSecretPropertiesOptions contains the optional parameters for the Client.NewListDeletedSecretPropertiesPager +// method. +func (client *Client) NewListDeletedSecretPropertiesPager(options *ListDeletedSecretPropertiesOptions) *runtime.Pager[ListDeletedSecretPropertiesResponse] { + return runtime.NewPager(runtime.PagingHandler[ListDeletedSecretPropertiesResponse]{ + More: func(page ListDeletedSecretPropertiesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListDeletedSecretPropertiesResponse) (ListDeletedSecretPropertiesResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listDeletedSecretPropertiesCreateRequest(ctx, options) + }, nil) + if err != nil { + return ListDeletedSecretPropertiesResponse{}, err + } + return client.listDeletedSecretPropertiesHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listDeletedSecretPropertiesCreateRequest creates the ListDeletedSecretProperties request. +func (client *Client) listDeletedSecretPropertiesCreateRequest(ctx context.Context, options *ListDeletedSecretPropertiesOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listDeletedSecretPropertiesHandleResponse handles the ListDeletedSecretProperties response. +func (client *Client) listDeletedSecretPropertiesHandleResponse(resp *http.Response) (ListDeletedSecretPropertiesResponse, error) { + result := ListDeletedSecretPropertiesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DeletedSecretPropertiesListResult); err != nil { + return ListDeletedSecretPropertiesResponse{}, err + } + return result, nil +} + +// NewListSecretPropertiesPager - The Get Secrets operation is applicable to the entire vault. However, only the base secret +// identifier and its attributes are provided in the response. Individual secret versions are not listed in the +// response. This operation requires the secrets/list permission. +// +// Generated from API version 7.5 +// - options - ListSecretPropertiesOptions contains the optional parameters for the Client.NewListSecretPropertiesPager +// method. +func (client *Client) NewListSecretPropertiesPager(options *ListSecretPropertiesOptions) *runtime.Pager[ListSecretPropertiesResponse] { + return runtime.NewPager(runtime.PagingHandler[ListSecretPropertiesResponse]{ + More: func(page ListSecretPropertiesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSecretPropertiesResponse) (ListSecretPropertiesResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listSecretPropertiesCreateRequest(ctx, options) + }, nil) + if err != nil { + return ListSecretPropertiesResponse{}, err + } + return client.listSecretPropertiesHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listSecretPropertiesCreateRequest creates the ListSecretProperties request. +func (client *Client) listSecretPropertiesCreateRequest(ctx context.Context, options *ListSecretPropertiesOptions) (*policy.Request, error) { + urlPath := "/secrets" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecretPropertiesHandleResponse handles the ListSecretProperties response. +func (client *Client) listSecretPropertiesHandleResponse(resp *http.Response) (ListSecretPropertiesResponse, error) { + result := ListSecretPropertiesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretPropertiesListResult); err != nil { + return ListSecretPropertiesResponse{}, err + } + return result, nil +} + +// NewListSecretPropertiesVersionsPager - The full secret identifier and attributes are provided in the response. No values +// are returned for the secrets. This operations requires the secrets/list permission. +// +// Generated from API version 7.5 +// - name - The name of the secret. +// - options - ListSecretPropertiesVersionsOptions contains the optional parameters for the Client.NewListSecretPropertiesVersionsPager +// method. +func (client *Client) NewListSecretPropertiesVersionsPager(name string, options *ListSecretPropertiesVersionsOptions) *runtime.Pager[ListSecretPropertiesVersionsResponse] { + return runtime.NewPager(runtime.PagingHandler[ListSecretPropertiesVersionsResponse]{ + More: func(page ListSecretPropertiesVersionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSecretPropertiesVersionsResponse) (ListSecretPropertiesVersionsResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listSecretPropertiesVersionsCreateRequest(ctx, name, options) + }, nil) + if err != nil { + return ListSecretPropertiesVersionsResponse{}, err + } + return client.listSecretPropertiesVersionsHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listSecretPropertiesVersionsCreateRequest creates the ListSecretPropertiesVersions request. +func (client *Client) listSecretPropertiesVersionsCreateRequest(ctx context.Context, name string, options *ListSecretPropertiesVersionsOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/versions" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecretPropertiesVersionsHandleResponse handles the ListSecretPropertiesVersions response. +func (client *Client) listSecretPropertiesVersionsHandleResponse(resp *http.Response) (ListSecretPropertiesVersionsResponse, error) { + result := ListSecretPropertiesVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecretPropertiesListResult); err != nil { + return ListSecretPropertiesVersionsResponse{}, err + } + return result, nil +} + +// PurgeDeletedSecret - The purge deleted secret operation removes the secret permanently, without the possibility of recovery. +// This operation can only be enabled on a soft-delete enabled vault. This operation requires the +// secrets/purge permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the secret. +// - options - PurgeDeletedSecretOptions contains the optional parameters for the Client.PurgeDeletedSecret method. +func (client *Client) PurgeDeletedSecret(ctx context.Context, name string, options *PurgeDeletedSecretOptions) (PurgeDeletedSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.PurgeDeletedSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.purgeDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return PurgeDeletedSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PurgeDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return PurgeDeletedSecretResponse{}, err + } + return PurgeDeletedSecretResponse{}, nil +} + +// purgeDeletedSecretCreateRequest creates the PurgeDeletedSecret request. +func (client *Client) purgeDeletedSecretCreateRequest(ctx context.Context, name string, options *PurgeDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RecoverDeletedSecret - Recovers the deleted secret in the specified vault. This operation can only be performed on a soft-delete +// enabled vault. This operation requires the secrets/recover permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the deleted secret. +// - options - RecoverDeletedSecretOptions contains the optional parameters for the Client.RecoverDeletedSecret method. +func (client *Client) RecoverDeletedSecret(ctx context.Context, name string, options *RecoverDeletedSecretOptions) (RecoverDeletedSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.RecoverDeletedSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.recoverDeletedSecretCreateRequest(ctx, name, options) + if err != nil { + return RecoverDeletedSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RecoverDeletedSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return RecoverDeletedSecretResponse{}, err + } + resp, err := client.recoverDeletedSecretHandleResponse(httpResp) + return resp, err +} + +// recoverDeletedSecretCreateRequest creates the RecoverDeletedSecret request. +func (client *Client) recoverDeletedSecretCreateRequest(ctx context.Context, name string, options *RecoverDeletedSecretOptions) (*policy.Request, error) { + urlPath := "/deletedsecrets/{secret-name}/recover" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// recoverDeletedSecretHandleResponse handles the RecoverDeletedSecret response. +func (client *Client) recoverDeletedSecretHandleResponse(resp *http.Response) (RecoverDeletedSecretResponse, error) { + result := RecoverDeletedSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Secret); err != nil { + return RecoverDeletedSecretResponse{}, err + } + return result, nil +} + +// RestoreSecret - Restores a backed up secret, and all its versions, to a vault. This operation requires the secrets/restore +// permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - parameters - The parameters to restore the secret. +// - options - RestoreSecretOptions contains the optional parameters for the Client.RestoreSecret method. +func (client *Client) RestoreSecret(ctx context.Context, parameters RestoreSecretParameters, options *RestoreSecretOptions) (RestoreSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.RestoreSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.restoreSecretCreateRequest(ctx, parameters, options) + if err != nil { + return RestoreSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return RestoreSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return RestoreSecretResponse{}, err + } + resp, err := client.restoreSecretHandleResponse(httpResp) + return resp, err +} + +// restoreSecretCreateRequest creates the RestoreSecret request. +func (client *Client) restoreSecretCreateRequest(ctx context.Context, parameters RestoreSecretParameters, options *RestoreSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/restore" + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// restoreSecretHandleResponse handles the RestoreSecret response. +func (client *Client) restoreSecretHandleResponse(resp *http.Response) (RestoreSecretResponse, error) { + result := RestoreSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Secret); err != nil { + return RestoreSecretResponse{}, err + } + return result, nil +} + +// SetSecret - The SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key Vault +// creates a new version of that secret. This operation requires the secrets/set permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the secret. The value you provide may be copied globally for the purpose of running the service. +// The value provided should not include personally identifiable or sensitive information. +// - parameters - The parameters for setting the secret. +// - options - SetSecretOptions contains the optional parameters for the Client.SetSecret method. +func (client *Client) SetSecret(ctx context.Context, name string, parameters SetSecretParameters, options *SetSecretOptions) (SetSecretResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.SetSecret", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.setSecretCreateRequest(ctx, name, parameters, options) + if err != nil { + return SetSecretResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return SetSecretResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return SetSecretResponse{}, err + } + resp, err := client.setSecretHandleResponse(httpResp) + return resp, err +} + +// setSecretCreateRequest creates the SetSecret request. +func (client *Client) setSecretCreateRequest(ctx context.Context, name string, parameters SetSecretParameters, options *SetSecretOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// setSecretHandleResponse handles the SetSecret response. +func (client *Client) setSecretHandleResponse(resp *http.Response) (SetSecretResponse, error) { + result := SetSecretResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Secret); err != nil { + return SetSecretResponse{}, err + } + return result, nil +} + +// UpdateSecretProperties - The UPDATE operation changes specified attributes of an existing stored secret. Attributes that +// are not specified in the request are left unchanged. The value of a secret itself cannot be changed. +// This operation requires the secrets/set permission. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 7.5 +// - name - The name of the secret. +// - version - The version of the secret. +// - parameters - The parameters for update secret operation. +// - options - UpdateSecretPropertiesOptions contains the optional parameters for the Client.UpdateSecretProperties method. +func (client *Client) UpdateSecretProperties(ctx context.Context, name string, version string, parameters UpdateSecretPropertiesParameters, options *UpdateSecretPropertiesOptions) (UpdateSecretPropertiesResponse, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, "Client.UpdateSecretProperties", client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.updateSecretPropertiesCreateRequest(ctx, name, version, parameters, options) + if err != nil { + return UpdateSecretPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return UpdateSecretPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return UpdateSecretPropertiesResponse{}, err + } + resp, err := client.updateSecretPropertiesHandleResponse(httpResp) + return resp, err +} + +// updateSecretPropertiesCreateRequest creates the UpdateSecretProperties request. +func (client *Client) updateSecretPropertiesCreateRequest(ctx context.Context, name string, version string, parameters UpdateSecretPropertiesParameters, options *UpdateSecretPropertiesOptions) (*policy.Request, error) { + urlPath := "/secrets/{secret-name}/{secret-version}" + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{secret-name}", url.PathEscape(name)) + urlPath = strings.ReplaceAll(urlPath, "{secret-version}", url.PathEscape(version)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.endpoint, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "7.5") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, parameters); err != nil { + return nil, err + } + return req, nil +} + +// updateSecretPropertiesHandleResponse handles the UpdateSecretProperties response. +func (client *Client) updateSecretPropertiesHandleResponse(resp *http.Response) (UpdateSecretPropertiesResponse, error) { + result := UpdateSecretPropertiesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Secret); err != nil { + return UpdateSecretPropertiesResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/custom_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/custom_client.go new file mode 100644 index 00000000000..a4070bda3a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/custom_client.go @@ -0,0 +1,68 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azsecrets + +// this file contains handwritten additions to the generated code + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal" +) + +// ClientOptions contains optional settings for Client. +type ClientOptions struct { + azcore.ClientOptions + + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain. + // See https://aka.ms/azsdk/blog/vault-uri for more information. + DisableChallengeResourceVerification bool +} + +// NewClient creates a client that accesses a Key Vault's secrets. You should validate that +// vaultURL references a valid Key Vault. See https://aka.ms/azsdk/blog/vault-uri for details. +func NewClient(vaultURL string, credential azcore.TokenCredential, options *ClientOptions) (*Client, error) { + if options == nil { + options = &ClientOptions{} + } + authPolicy := internal.NewKeyVaultChallengePolicy( + credential, + &internal.KeyVaultChallengePolicyOptions{ + DisableChallengeResourceVerification: options.DisableChallengeResourceVerification, + }, + ) + azcoreClient, err := azcore.NewClient(moduleName, version, runtime.PipelineOptions{ + PerRetry: []policy.Policy{authPolicy}, + Tracing: runtime.TracingOptions{ + Namespace: "Microsoft.KeyVault", + }, + }, &options.ClientOptions) + if err != nil { + return nil, err + } + return &Client{endpoint: vaultURL, internal: azcoreClient}, nil +} + +// ID is a secret's unique ID, containing its name and version. +type ID string + +// Name of the secret. +func (i *ID) Name() string { + _, name, _ := internal.ParseID((*string)(i)) + return *name +} + +// Version of the secret. This returns an empty string when the ID contains no version. +func (i *ID) Version() string { + _, _, version := internal.ParseID((*string)(i)) + if version == nil { + return "" + } + return *version +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models.go new file mode 100644 index 00000000000..aff7c87e3f3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models.go @@ -0,0 +1,204 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azsecrets + +import "time" + +// BackupSecretResult - The backup secret result, containing the backup blob. +type BackupSecretResult struct { + // READ-ONLY; The backup blob containing the backed up secret. + Value []byte +} + +// DeletedSecret - A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when +// it will be purged. +type DeletedSecret struct { + // The secret management attributes. + Attributes *SecretAttributes + + // The content type of the secret. + ContentType *string + + // The secret id. + ID *ID + + // The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // The secret value. + Value *string + + // READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *time.Time + + // READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV + // certificate. + KID *ID + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed + // will be true. + Managed *bool + + // READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time +} + +// DeletedSecretProperties - The deleted secret item containing metadata about the deleted secret. +type DeletedSecretProperties struct { + // The secret management attributes. + Attributes *SecretAttributes + + // Type of the secret value such as a password. + ContentType *string + + // Secret identifier. + ID *ID + + // The url of the recovery object, used to identify and recover the deleted secret. + RecoveryID *string + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // READ-ONLY; The time when the secret was deleted, in UTC + DeletedDate *time.Time + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed + // will be true. + Managed *bool + + // READ-ONLY; The time when the secret is scheduled to be purged, in UTC + ScheduledPurgeDate *time.Time +} + +// DeletedSecretPropertiesListResult - The deleted secret list result +type DeletedSecretPropertiesListResult struct { + // READ-ONLY; The URL to get the next set of deleted secrets. + NextLink *string + + // READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page + // of deleted secrets + Value []*DeletedSecretProperties +} + +// RestoreSecretParameters - The secret restore parameters. +type RestoreSecretParameters struct { + // REQUIRED; The backup blob associated with a secret bundle. + SecretBackup []byte +} + +// Secret - A secret consisting of a value, id and its attributes. +type Secret struct { + // The secret management attributes. + Attributes *SecretAttributes + + // The content type of the secret. + ContentType *string + + // The secret id. + ID *ID + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // The secret value. + Value *string + + // READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV + // certificate. + KID *ID + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed + // will be true. + Managed *bool +} + +// SecretAttributes - The secret management attributes. +type SecretAttributes struct { + // Determines whether the object is enabled. + Enabled *bool + + // Expiry date in UTC. + Expires *time.Time + + // Not before date in UTC. + NotBefore *time.Time + + // READ-ONLY; Creation time in UTC. + Created *time.Time + + // READ-ONLY; softDelete data retention days. Value should be >=7 and <=90 when softDelete enabled, otherwise 0. + RecoverableDays *int32 + + // READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', + // the secret can be permanently deleted by a privileged user; otherwise, only the + // system can purge the secret, at the end of the retention interval. + RecoveryLevel *string + + // READ-ONLY; Last updated time in UTC. + Updated *time.Time +} + +// SecretProperties - The secret item containing secret metadata. +type SecretProperties struct { + // The secret management attributes. + Attributes *SecretAttributes + + // Type of the secret value such as a password. + ContentType *string + + // Secret identifier. + ID *ID + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string + + // READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed + // will be true. + Managed *bool +} + +// SecretPropertiesListResult - The secret list result. +type SecretPropertiesListResult struct { + // READ-ONLY; The URL to get the next set of secrets. + NextLink *string + + // READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. + Value []*SecretProperties +} + +// SetSecretParameters - The secret set parameters. +type SetSecretParameters struct { + // REQUIRED; The value of the secret. + Value *string + + // Type of the secret value such as a password. + ContentType *string + + // The secret management attributes. + SecretAttributes *SecretAttributes + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string +} + +// UpdateSecretPropertiesParameters - The secret update parameters. +type UpdateSecretPropertiesParameters struct { + // Type of the secret value such as a password. + ContentType *string + + // The secret management attributes. + SecretAttributes *SecretAttributes + + // Application specific metadata in the form of key-value pairs. + Tags map[string]*string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models_serde.go new file mode 100644 index 00000000000..077c2a1ee1e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/models_serde.go @@ -0,0 +1,500 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azsecrets + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type BackupSecretResult. +func (b BackupSecretResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", b.Value, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type BackupSecretResult. +func (b *BackupSecretResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &b.Value, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", b, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecret. +func (d DeletedSecret) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populate(objectMap, "contentType", d.ContentType) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "id", d.ID) + populate(objectMap, "kid", d.KID) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecret. +func (d *DeletedSecret) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &d.ContentType) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &d.ID) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &d.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretProperties. +func (d DeletedSecretProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", d.Attributes) + populate(objectMap, "contentType", d.ContentType) + populateTimeUnix(objectMap, "deletedDate", d.DeletedDate) + populate(objectMap, "id", d.ID) + populate(objectMap, "managed", d.Managed) + populate(objectMap, "recoveryId", d.RecoveryID) + populateTimeUnix(objectMap, "scheduledPurgeDate", d.ScheduledPurgeDate) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretProperties. +func (d *DeletedSecretProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &d.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &d.ContentType) + delete(rawMsg, key) + case "deletedDate": + err = unpopulateTimeUnix(val, "DeletedDate", &d.DeletedDate) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &d.ID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &d.Managed) + delete(rawMsg, key) + case "recoveryId": + err = unpopulate(val, "RecoveryID", &d.RecoveryID) + delete(rawMsg, key) + case "scheduledPurgeDate": + err = unpopulateTimeUnix(val, "ScheduledPurgeDate", &d.ScheduledPurgeDate) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &d.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DeletedSecretPropertiesListResult. +func (d DeletedSecretPropertiesListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", d.NextLink) + populate(objectMap, "value", d.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DeletedSecretPropertiesListResult. +func (d *DeletedSecretPropertiesListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &d.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &d.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RestoreSecretParameters. +func (r RestoreSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateByteArray(objectMap, "value", r.SecretBackup, runtime.Base64URLFormat) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RestoreSecretParameters. +func (r *RestoreSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "value": + err = runtime.DecodeByteArray(string(val), &r.SecretBackup, runtime.Base64URLFormat) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Secret. +func (s Secret) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", s.Attributes) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "id", s.ID) + populate(objectMap, "kid", s.KID) + populate(objectMap, "managed", s.Managed) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Secret. +func (s *Secret) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &s.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "kid": + err = unpopulate(val, "KID", &s.KID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &s.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretAttributes. +func (s SecretAttributes) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeUnix(objectMap, "created", s.Created) + populate(objectMap, "enabled", s.Enabled) + populateTimeUnix(objectMap, "exp", s.Expires) + populateTimeUnix(objectMap, "nbf", s.NotBefore) + populate(objectMap, "recoverableDays", s.RecoverableDays) + populate(objectMap, "recoveryLevel", s.RecoveryLevel) + populateTimeUnix(objectMap, "updated", s.Updated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretAttributes. +func (s *SecretAttributes) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "created": + err = unpopulateTimeUnix(val, "Created", &s.Created) + delete(rawMsg, key) + case "enabled": + err = unpopulate(val, "Enabled", &s.Enabled) + delete(rawMsg, key) + case "exp": + err = unpopulateTimeUnix(val, "Expires", &s.Expires) + delete(rawMsg, key) + case "nbf": + err = unpopulateTimeUnix(val, "NotBefore", &s.NotBefore) + delete(rawMsg, key) + case "recoverableDays": + err = unpopulate(val, "RecoverableDays", &s.RecoverableDays) + delete(rawMsg, key) + case "recoveryLevel": + err = unpopulate(val, "RecoveryLevel", &s.RecoveryLevel) + delete(rawMsg, key) + case "updated": + err = unpopulateTimeUnix(val, "Updated", &s.Updated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretProperties. +func (s SecretProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "attributes", s.Attributes) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "id", s.ID) + populate(objectMap, "managed", s.Managed) + populate(objectMap, "tags", s.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretProperties. +func (s *SecretProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "attributes": + err = unpopulate(val, "Attributes", &s.Attributes) + delete(rawMsg, key) + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "managed": + err = unpopulate(val, "Managed", &s.Managed) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecretPropertiesListResult. +func (s SecretPropertiesListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", s.NextLink) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecretPropertiesListResult. +func (s *SecretPropertiesListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &s.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SetSecretParameters. +func (s SetSecretParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", s.ContentType) + populate(objectMap, "attributes", s.SecretAttributes) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SetSecretParameters. +func (s *SetSecretParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &s.ContentType) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "SecretAttributes", &s.SecretAttributes) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &s.Tags) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateSecretPropertiesParameters. +func (u UpdateSecretPropertiesParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "contentType", u.ContentType) + populate(objectMap, "attributes", u.SecretAttributes) + populate(objectMap, "tags", u.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UpdateSecretPropertiesParameters. +func (u *UpdateSecretPropertiesParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "contentType": + err = unpopulate(val, "ContentType", &u.ContentType) + delete(rawMsg, key) + case "attributes": + err = unpopulate(val, "SecretAttributes", &u.SecretAttributes) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &u.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateByteArray(m map[string]any, k string, b []byte, f runtime.Base64Encoding) { + if azcore.IsNullValue(b) { + m[k] = nil + } else if len(b) == 0 { + return + } else { + m[k] = runtime.EncodeByteArray(b, f) + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/options.go new file mode 100644 index 00000000000..6d35566a505 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/options.go @@ -0,0 +1,71 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azsecrets + +// BackupSecretOptions contains the optional parameters for the Client.BackupSecret method. +type BackupSecretOptions struct { + // placeholder for future optional parameters +} + +// DeleteSecretOptions contains the optional parameters for the Client.DeleteSecret method. +type DeleteSecretOptions struct { + // placeholder for future optional parameters +} + +// GetDeletedSecretOptions contains the optional parameters for the Client.GetDeletedSecret method. +type GetDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// GetSecretOptions contains the optional parameters for the Client.GetSecret method. +type GetSecretOptions struct { + // placeholder for future optional parameters +} + +// ListDeletedSecretPropertiesOptions contains the optional parameters for the Client.NewListDeletedSecretPropertiesPager +// method. +type ListDeletedSecretPropertiesOptions struct { + // placeholder for future optional parameters +} + +// ListSecretPropertiesOptions contains the optional parameters for the Client.NewListSecretPropertiesPager method. +type ListSecretPropertiesOptions struct { + // placeholder for future optional parameters +} + +// ListSecretPropertiesVersionsOptions contains the optional parameters for the Client.NewListSecretPropertiesVersionsPager +// method. +type ListSecretPropertiesVersionsOptions struct { + // placeholder for future optional parameters +} + +// PurgeDeletedSecretOptions contains the optional parameters for the Client.PurgeDeletedSecret method. +type PurgeDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// RecoverDeletedSecretOptions contains the optional parameters for the Client.RecoverDeletedSecret method. +type RecoverDeletedSecretOptions struct { + // placeholder for future optional parameters +} + +// RestoreSecretOptions contains the optional parameters for the Client.RestoreSecret method. +type RestoreSecretOptions struct { + // placeholder for future optional parameters +} + +// SetSecretOptions contains the optional parameters for the Client.SetSecret method. +type SetSecretOptions struct { + // placeholder for future optional parameters +} + +// UpdateSecretPropertiesOptions contains the optional parameters for the Client.UpdateSecretProperties method. +type UpdateSecretPropertiesOptions struct { + // placeholder for future optional parameters +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/response_types.go new file mode 100644 index 00000000000..43f00000a23 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/response_types.go @@ -0,0 +1,80 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azsecrets + +// BackupSecretResponse contains the response from method Client.BackupSecret. +type BackupSecretResponse struct { + // The backup secret result, containing the backup blob. + BackupSecretResult +} + +// DeleteSecretResponse contains the response from method Client.DeleteSecret. +type DeleteSecretResponse struct { + // A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when it will be purged. + DeletedSecret +} + +// GetDeletedSecretResponse contains the response from method Client.GetDeletedSecret. +type GetDeletedSecretResponse struct { + // A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when it will be purged. + DeletedSecret +} + +// GetSecretResponse contains the response from method Client.GetSecret. +type GetSecretResponse struct { + // A secret consisting of a value, id and its attributes. + Secret +} + +// ListDeletedSecretPropertiesResponse contains the response from method Client.NewListDeletedSecretPropertiesPager. +type ListDeletedSecretPropertiesResponse struct { + // The deleted secret list result + DeletedSecretPropertiesListResult +} + +// ListSecretPropertiesResponse contains the response from method Client.NewListSecretPropertiesPager. +type ListSecretPropertiesResponse struct { + // The secret list result. + SecretPropertiesListResult +} + +// ListSecretPropertiesVersionsResponse contains the response from method Client.NewListSecretPropertiesVersionsPager. +type ListSecretPropertiesVersionsResponse struct { + // The secret list result. + SecretPropertiesListResult +} + +// PurgeDeletedSecretResponse contains the response from method Client.PurgeDeletedSecret. +type PurgeDeletedSecretResponse struct { + // placeholder for future response values +} + +// RecoverDeletedSecretResponse contains the response from method Client.RecoverDeletedSecret. +type RecoverDeletedSecretResponse struct { + // A secret consisting of a value, id and its attributes. + Secret +} + +// RestoreSecretResponse contains the response from method Client.RestoreSecret. +type RestoreSecretResponse struct { + // A secret consisting of a value, id and its attributes. + Secret +} + +// SetSecretResponse contains the response from method Client.SetSecret. +type SetSecretResponse struct { + // A secret consisting of a value, id and its attributes. + Secret +} + +// UpdateSecretPropertiesResponse contains the response from method Client.UpdateSecretProperties. +type UpdateSecretPropertiesResponse struct { + // A secret consisting of a value, id and its attributes. + Secret +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/test-resources.json new file mode 100644 index 00000000000..20f726f3322 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/test-resources.json @@ -0,0 +1,331 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "string", + "defaultValue": "[resourceGroup().name]", + "metadata": { + "description": "The base resource name." + } + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The client OID to grant access to test resources." + } + }, + "provisionerApplicationOid": { + "type": "string", + "metadata": { + "description": "The provisioner OID to grant access to test resources." + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "The location of the resource. By default, this is the same as the resource group." + } + }, + "hsmLocation": { + "type": "string", + "defaultValue": "southcentralus", + "allowedValues": [ + "australiacentral", + "canadacentral", + "centralus", + "eastasia", + "eastus2", + "koreacentral", + "northeurope", + "southafricanorth", + "southcentralus", + "southeastasia", + "switzerlandnorth", + "uksouth", + "westeurope", + "westus" + ], + "metadata": { + "description": "The location of the Managed HSM. By default, this is 'southcentralus'." + } + }, + "enableHsm": { + "type": "bool", + "defaultValue": false, + "metadata": { + "description": "Whether to enable deployment of Managed HSM. The default is false." + } + }, + "keyVaultSku": { + "type": "string", + "defaultValue": "premium", + "metadata": { + "description": "Key Vault SKU to deploy. The default is 'premium'" + } + }, + "attestationImage": { + "type": "string", + "defaultValue": "keyvault-mock-attestation:latest", + "metadata": { + "description": "The container image name and tag to use for the attestation mock service." + } + } + }, + "variables": { + "attestationFarm": "[concat(parameters('baseName'), 'farm')]", + "attestationSite": "[concat(parameters('baseName'), 'site')]", + "attestationUri": "[concat('DOCKER|azsdkengsys.azurecr.io/', parameters('attestationImage'))]", + "kvApiVersion": "2019-09-01", + "kvName": "[parameters('baseName')]", + "hsmApiVersion": "2021-04-01-preview", + "hsmName": "[concat(parameters('baseName'), 'hsm')]", + "mgmtApiVersion": "2019-04-01", + "blobContainerName": "backup", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "encryption": { + "services": { + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.KeyVault/vaults", + "apiVersion": "[variables('kvApiVersion')]", + "name": "[variables('kvName')]", + "location": "[parameters('location')]", + "properties": { + "sku": { + "family": "A", + "name": "[parameters('keyVaultSku')]" + }, + "tenantId": "[parameters('tenantId')]", + "accessPolicies": [ + { + "tenantId": "[parameters('tenantId')]", + "objectId": "[parameters('testApplicationOid')]", + "permissions": { + "keys": [ + "backup", + "create", + "decrypt", + "delete", + "encrypt", + "get", + "import", + "list", + "purge", + "recover", + "release", + "restore", + "rotate", + "sign", + "unwrapKey", + "update", + "verify", + "wrapKey" + ], + "secrets": [ + "backup", + "delete", + "get", + "list", + "purge", + "recover", + "restore", + "set" + ], + "certificates": [ + "backup", + "create", + "delete", + "deleteissuers", + "get", + "getissuers", + "import", + "list", + "listissuers", + "managecontacts", + "manageissuers", + "purge", + "recover", + "restore", + "setissuers", + "update" + ] + } + } + ], + "enabledForDeployment": false, + "enabledForDiskEncryption": false, + "enabledForTemplateDeployment": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7 + } + }, + { + "type": "Microsoft.KeyVault/managedHSMs", + "apiVersion": "[variables('hsmApiVersion')]", + "name": "[variables('hsmName')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('hsmLocation')]", + "sku": { + "family": "B", + "name": "Standard_B1" + }, + "properties": { + "tenantId": "[parameters('tenantId')]", + "initialAdminObjectIds": "[union(array(parameters('testApplicationOid')), array(parameters('provisionerApplicationOid')))]", + "enablePurgeProtection": false, + "enableSoftDelete": true, + "softDeleteRetentionInDays": 7, + "publicNetworkAccess": "Enabled", + "networkAcls": "[variables('networkAcls')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[parameters('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "properties": { + "cors": { + "corsRules": [] + }, + "deleteRetentionPolicy": { + "enabled": false + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices/containers", + "apiVersion": "2019-06-01", + "name": "[concat(variables('primaryAccountName'), '/default/', variables('blobContainerName'))]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts/blobServices', variables('primaryAccountName'), 'default')]", + "[resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName'))]" + ], + "properties": { + "publicAccess": "None" + } + }, + { + + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2020-12-01", + "name": "[variables('attestationFarm')]", + "condition": "[parameters('enableHsm')]", + "location": "[parameters('location')]", + "kind": "linux", + "sku": { + "name": "B1" + }, + "properties": { + "reserved": true + } + }, + { + + "type": "Microsoft.Web/sites", + "apiVersion": "2020-12-01", + "name": "[variables('attestationSite')]", + "condition": "[parameters('enableHsm')]", + "dependsOn": [ + "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]" + ], + "location": "[parameters('location')]", + "properties": { + "httpsOnly": true, + "serverFarmId": "[resourceId('Microsoft.Web/serverfarms', variables('attestationFarm'))]", + "siteConfig": { + "name": "[variables('attestationSite')]", + "alwaysOn": true, + "linuxFxVersion": "[variables('attestationUri')]", + "appSettings": [ + { + "name": "WEBSITES_ENABLE_APP_SERVICE_STORAGE", + "value": "false" + } + ] + } + } + } + ], + "outputs": { + "AZURE_KEYVAULT_URL": { + "type": "string", + "value": "[reference(variables('kvName')).vaultUri]" + }, + "AZURE_MANAGEDHSM_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[reference(variables('hsmName')).hsmUri]" + }, + "KEYVAULT_SKU": { + "type": "string", + "value": "[reference(parameters('baseName')).sku.name]" + }, + "CLIENT_OBJECTID": { + "type": "string", + "value": "[parameters('testApplicationOid')]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "BLOB_PRIMARY_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(variables('primaryAccountName'), variables('mgmtApiVersion')).keys[0].value]" + }, + "BLOB_CONTAINER_NAME" : { + "type": "string", + "value": "[variables('blobContainerName')]" + }, + "AZURE_KEYVAULT_ATTESTATION_URL": { + "type": "string", + "condition": "[parameters('enableHsm')]", + "value": "[format('https://{0}/', reference(variables('attestationSite')).defaultHostName)]" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/time_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/time_unix.go new file mode 100644 index 00000000000..ce0ec3e9a41 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/time_unix.go @@ -0,0 +1,61 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azsecrets + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "strings" + "time" +) + +type timeUnix time.Time + +func (t timeUnix) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(t).Unix()) +} + +func (t *timeUnix) UnmarshalJSON(data []byte) error { + var seconds int64 + if err := json.Unmarshal(data, &seconds); err != nil { + return err + } + *t = timeUnix(time.Unix(seconds, 0)) + return nil +} + +func (t timeUnix) String() string { + return fmt.Sprintf("%d", time.Time(t).Unix()) +} + +func populateTimeUnix(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeUnix)(t) +} + +func unpopulateTimeUnix(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeUnix + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/version.go new file mode 100644 index 00000000000..2d3ed87cfa2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azsecrets + +const ( + moduleName = "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets" + version = "v1.1.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md new file mode 100644 index 00000000000..8b2eaba74ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/CHANGELOG.md @@ -0,0 +1,70 @@ +# Release History + +## 1.0.0 (2023-08-15) + +### Features Added +* This is the first stable release of the `internal` library for KeyVault. + +### Other Changes +* Upgrade dependencies + +## 0.8.0 (2023-03-08) + +### Breaking Changes +* Moved to new location + +### Other Changes +* Upgrade dependencies + +## 0.7.1 (2022-11-14) + +### Bugs Fixed +* `KeyVaultChallengePolicy` uses incorrect authentication scope when challenge verification is disabled + +## 0.7.0 (2022-09-20) + +### Breaking Changes +* Added `*KeyVaultChallengePolicyOptions` parameter to `NewKeyVaultChallengePolicy` + +## 0.6.0 (2022-09-12) + +### Breaking Changes +* Verify the challenge resource matches the vault domain. See https://aka.ms/azsdk/blog/vault-uri for more information. +* `ParseID()` no longer appends a trailing slash to vault URLs + +## 0.5.0 (2022-05-12) + +### Breaking Changes +* Removed `ExpiringResource` and its dependencies in favor of shared implementation from `internal/temporal`. + +### Other Changes +* Updated to latest versions of `azcore` and `internal`. + +## 0.4.0 (2022-04-22) + +### Breaking Changes +* Updated `ExpiringResource` and its dependent types to use generics. + +### Other Changes +* Remove reference to `TokenRequestOptions.TenantID` as it's been removed and wasn't working anyways. + +## 0.3.0 (2022-04-04) + +### Features Added +* Adds the `ParseKeyvaultID` function to parse an ID into the Key Vault URL, item name, and item version + +### Breaking Changes +* Updates to azcore v0.23.0 + +## 0.2.1 (2022-01-31) + +### Bugs Fixed +* Avoid retries on terminal failures (#16932) + +## 0.2.0 (2022-01-12) + +### Bugs Fixed +* Fixes a bug with Managed HSMs that prevented correctly authorizing requests. + +## 0.1.0 (2021-11-09) +* This is the initial release of the `internal` library for KeyVault diff --git a/vendor/github.com/Azure/azure-storage-blob-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt similarity index 100% rename from vendor/github.com/Azure/azure-storage-blob-go/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/LICENSE.txt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md new file mode 100644 index 00000000000..8516337cf61 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/README.md @@ -0,0 +1,21 @@ +# Key Vault Internal Module for Go + +This module contains shared code for all the Key Vault SDKs, mainly the challenge authentication policy. + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go new file mode 100644 index 00000000000..f5c8b725f2f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/challenge_policy.go @@ -0,0 +1,175 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +const challengeMatchError = `challenge resource "%s" doesn't match the requested domain. Set DisableChallengeResourceVerification to true in your client options to disable. See https://aka.ms/azsdk/blog/vault-uri for more information` + +type KeyVaultChallengePolicyOptions struct { + // DisableChallengeResourceVerification controls whether the policy requires the + // authentication challenge resource to match the Key Vault or Managed HSM domain + DisableChallengeResourceVerification bool +} + +type keyVaultAuthorizer struct { + // tro is the policy's authentication parameters. These are discovered from an authentication challenge + // elicited ahead of the first client request. + tro policy.TokenRequestOptions + // TODO: move into tro once it has a tenant field (https://github.com/Azure/azure-sdk-for-go/issues/19841) + tenantID string + verifyChallengeResource bool +} + +type reqBody struct { + body io.ReadSeekCloser + contentType string +} + +func NewKeyVaultChallengePolicy(cred azcore.TokenCredential, opts *KeyVaultChallengePolicyOptions) policy.Policy { + if opts == nil { + opts = &KeyVaultChallengePolicyOptions{} + } + kv := keyVaultAuthorizer{ + verifyChallengeResource: !opts.DisableChallengeResourceVerification, + } + return runtime.NewBearerTokenPolicy(cred, nil, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: kv.authorize, + OnChallenge: kv.authorizeOnChallenge, + }, + }) +} + +func (k *keyVaultAuthorizer) authorize(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + if len(k.tro.Scopes) == 0 || k.tenantID == "" { + if body := req.Body(); body != nil { + // We don't know the scope or tenant ID because we haven't seen a challenge yet. We elicit one now by sending + // the request without authorization, first removing its body, if any. authorizeOnChallenge will reattach the + // body, authorize the request, and send it again. + rb := reqBody{body, req.Raw().Header.Get("content-type")} + req.SetOperationValue(rb) + if err := req.SetBody(nil, ""); err != nil { + return err + } + } + // returning nil indicates the bearer token policy should send the request + return nil + } + // else we know the auth parameters and can authorize the request as normal + return authNZ(k.tro) +} + +func (k *keyVaultAuthorizer) authorizeOnChallenge(req *policy.Request, res *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + if err := k.updateTokenRequestOptions(res, req.Raw()); err != nil { + return err + } + // reattach the request's original body, if it was removed by authorize(). If a bug prevents recovering + // the body, this policy will send the request without it and get a 400 response from Key Vault. + var rb reqBody + if req.OperationValue(&rb) { + if err := req.SetBody(rb.body, rb.contentType); err != nil { + return err + } + } + // authenticate with the parameters supplied by Key Vault, authorize the request, send it again + return authNZ(k.tro) +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000 +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +var _ errorinfo.NonRetriable = (*challengePolicyError)(nil) + +// updateTokenRequestOptions parses authentication parameters from Key Vault's challenge +func (k *keyVaultAuthorizer) updateTokenRequestOptions(resp *http.Response, req *http.Request) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization=\"\" resource=\"\"" OR + // "Bearer authorization=\"\" scope=\"\" resource=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + k.tenantID = parseTenant(vals["authorization"]) + scope := "" + if v, ok := vals["scope"]; ok { + scope = v + } else if v, ok := vals["resource"]; ok { + scope = v + } + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + if k.verifyChallengeResource { + // the challenge resource's host must match the requested vault's host + parsed, err := url.Parse(scope) + if err != nil { + return &challengePolicyError{err: fmt.Errorf(`invalid challenge resource "%s": %v`, scope, err)} + } + if !strings.HasSuffix(req.URL.Host, "."+parsed.Host) { + return &challengePolicyError{err: fmt.Errorf(challengeMatchError, scope)} + } + } + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + k.tro.Scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml new file mode 100644 index 00000000000..2f8b8e1a87a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/ci.securitykeyvault.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/security/keyvault/internal + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'security/keyvault/internal' + RunLiveTests: false diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go similarity index 62% rename from vendor/github.com/Azure/azure-sdk-for-go/version/version.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go index bcfbb15cce0..f080c819f55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/constants.go @@ -1,7 +1,11 @@ -package version +//go:build go1.18 +// +build go1.18 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Number contains the semantic version of this SDK. -const Number = "v68.0.0" +package internal + +const ( + version = "v1.0.0" //nolint +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go new file mode 100644 index 00000000000..d8f93492f51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package internal diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go new file mode 100644 index 00000000000..8511832d27c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal/parse.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +package internal + +import ( + "fmt" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// ParseID parses "https://myvaultname.vault.azure.net/keys/key1053998307/b86c2e6ad9054f4abf69cc185b99aa60" +// into "https://myvaultname.managedhsm.azure.net/", "key1053998307", and "b86c2e6ad9054f4abf69cc185b99aa60" +func ParseID(id *string) (*string, *string, *string) { + if id == nil { + return nil, nil, nil + } + parsed, err := url.Parse(*id) + if err != nil { + return nil, nil, nil + } + + url := fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host) + split := strings.Split(strings.TrimPrefix(parsed.Path, "/"), "/") + if len(split) < 3 { + if len(split) == 2 { + return &url, to.Ptr(split[1]), nil + } + return &url, nil, nil + } + + return &url, to.Ptr(split[1]), to.Ptr(split[2]) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md new file mode 100644 index 00000000000..f6c99441ba9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -0,0 +1,285 @@ +# Release History + +## 1.3.2 (2024-04-09) + +### Bugs Fixed +* Fixed an issue where GetSASURL() was providing HTTPS SAS, instead of the default http+https SAS. Fixes [#22448](https://github.com/Azure/azure-sdk-for-go/issues/22448) + +### Other Changes +* Integrate `InsecureAllowCredentialWithHTTP` client options. +* Update dependencies. + +## 1.3.1 (2024-02-28) + +### Bugs Fixed + +* Re-enabled `SharedKeyCredential` authentication mode for non TLS protected endpoints. +* Use random write in `DownloadFile` method. Fixes [#22426](https://github.com/Azure/azure-sdk-for-go/issues/22426). + +## 1.3.0 (2024-02-12) + +### Bugs Fixed +* Fix concurrency issue while Downloading File. Fixes [#22156](https://github.com/Azure/azure-sdk-for-go/issues/22156). +* Fix panic when nil options bag is passed to NewGetPageRangesPager. Fixes [22356](https://github.com/Azure/azure-sdk-for-go/issues/22356). +* Fix file offset update after Download file. Fixes [#22297](https://github.com/Azure/azure-sdk-for-go/issues/22297). + +### Other Changes +* Updated the version of `azcore` to `1.9.2` + +## 1.3.0-beta.1 (2024-01-09) + +### Features Added + +* Updated service version to `2023-11-03`. +* Added support for Audience when OAuth is used. + +### Bugs Fixed + +* Block `SharedKeyCredential` authentication mode for non TLS protected endpoints. Fixes [#21841](https://github.com/Azure/azure-sdk-for-go/issues/21841). + +## 1.2.1 (2023-12-13) + +### Features Added + +* Exposed GetSASURL from specialized clients + +### Bugs Fixed + +* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649). +* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method +* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs. +* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869). +* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995). +* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837). + +### Other Changes + +* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`. + +## 1.2.0 (2023-10-11) + +### Bugs Fixed +* Fixed null pointer exception when `SetImmutabilityPolicyOptions` is passed as `nil`. + +## 1.2.0-beta.1 (2023-09-18) + +### Features Added +* Added support for service version 2020-12-06, 2021-02-12, 2021-04-10, 2021-06-08, 2021-08-06 , 2021-10-04, 2021-12-02, 2022-11-02, 2023-01-03, 2023-05-03, and 2023-08-03 +* Added support for [Cold Tier](https://learn.microsoft.com/azure/storage/blobs/access-tiers-overview?tabs=azure-portal). +* Added `CopySourceTag` option for `UploadBlobFromURLOptions` +* Added [FilterBlobs by Tags](https://learn.microsoft.com/rest/api/storageservices/find-blobs-by-tags-container) API for container client. +* Added `System` option to `ListContainersInclude` to allow listing of system containers (i.e, $web). +* Updated the SAS Version to `2021-12-02` and added `Encryption Scope` to Account SAS, Service SAS, and User Delegation SAS +* Added `ArchiveStatusRehydratePendingToCold` value to `ArchiveStatus` enum. +* Content length limit for `AppendBlob.AppendBlock()` and `AppendBlob.AppendBlockFromURL()` raised from 4 MB to 100 MB. + +### Bugs Fixed +* Fixed issue where some requests fail with mismatch in string to sign. +* Fixed service SAS creation where expiry time or permissions can be omitted when stored access policy is used. Fixes [#21229](https://github.com/Azure/azure-sdk-for-go/issues/21229). + +### Other Changes +* Updating version of azcore to 1.6.0. + +## 1.1.0 (2023-07-13) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. +* Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735). + +### Other Changes + +* Add `dragonfly` to the list of build constraints for `blockblob`. +* Updating version of azcore to 1.6.0 and azidentity to 1.3.0 + +## 1.1.0-beta.1 (2023-05-09) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. + +## 1.0.0 (2023-02-07) + +### Features Added + +* Add support to log calculated block size and count during uploads +* Added MissingSharedKeyCredential error type for cleaner UX. Related to [#19864](https://github.com/Azure/azure-sdk-for-go/issues/19864). + +### Breaking Changes + +* Changed API signatures to map correctly to Azure Storage REST APIs, These changes impact: + * `blob.GetSASURL()` + * `blockblob.StageBlockFromURL()` + * `container.SetAccessPolicy()` + * `container.GetSASURL()` + * `service.GetSASURL()` + * `service.FilterBlobs()` + * `lease.AcquireLease()` (blobs and containers) + * `lease.ChangeLease()` (blobs and containers) +* Type name changes: + * `CpkInfo` -> `CPKInfo` + * `CpkScopeInfo` -> `CPKScopeInfo` + * `RuleId` -> `RuleID` + * `PolicyId` -> `PolicyID` + * `CorsRule` -> `CORSRule` +* Remove `AccountServices` it is now hardcoded to blobs + +### Bugs Fixed + +* Fixed encoding issues seen in FilterBlobs. Fixes [#17421](https://github.com/Azure/azure-sdk-for-go/issues/17421). +* Fixing inconsistency seen with Metadata and ORS response. Fixes [#19688](https://github.com/Azure/azure-sdk-for-go/issues/19688). +* Fixed endless loop during pagination issue [#19773](https://github.com/Azure/azure-sdk-for-go/pull/19773). + +### Other Changes + +* Exported some missing types in the `blob`, `container` and `service` packages. Fixes [#19775](https://github.com/Azure/azure-sdk-for-go/issues/19775). +* SAS changes [#19781](https://github.com/Azure/azure-sdk-for-go/pull/19781): + * AccountSASPermissions: SetImmutabilityPolicy support + * ContainerSASPermissions: Move support + * Validations to ensure correct sas perm ordering + +## 0.6.1 (2022-12-09) + +### Bugs Fixed + +* Fix compilation error on Darwin. + +## 0.6.0 (2022-12-08) + +### Features Added + +* Added BlobDeleteType to DeleteOptions to allow access to ['Permanent'](https://learn.microsoft.com/rest/api/storageservices/delete-blob#permanent-delete) DeleteType. +* Added [Set Blob Expiry API](https://learn.microsoft.com/rest/api/storageservices/set-blob-expiry). +* Added method `ServiceClient()` to the `azblob.Client` type, allowing access to the underlying service client. +* Added support for object level immutability policy with versioning (Version Level WORM). +* Added the custom CRC64 polynomial used by storage for transactional hashes, and implemented automatic hashing for transactions. + +### Breaking Changes + +* Corrected the name for `saoid` and `suoid` SAS parameters in `BlobSignatureValues` struct as per [this](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas#construct-a-user-delegation-sas) +* Updated type of `BlockSize` from int to int64 in `UploadStreamOptions` +* CRC64 transactional hashes are now supplied with a `uint64` rather than a `[]byte` to conform with Golang's `hash/crc64` package +* Field `XMSContentCRC64` has been renamed to `ContentCRC64` +* The `Lease*` constant types and values in the `blob` and `container` packages have been moved to the `lease` package and their names fixed up to avoid stuttering. +* Fields `TransactionalContentCRC64` and `TransactionalContentMD5` have been replaced by `TransactionalValidation`. +* Fields `SourceContentCRC64` and `SourceContentMD5` have been replaced by `SourceContentValidation`. +* Field `TransactionalContentMD5` has been removed from type `AppendBlockFromURLOptions`. + +### Bugs Fixed + +* Corrected signing of User Delegation SAS. Fixes [#19372](https://github.com/Azure/azure-sdk-for-go/issues/19372) and [#19454](https://github.com/Azure/azure-sdk-for-go/issues/19454) +* Added formatting of start and expiry time in [SetAccessPolicy](https://learn.microsoft.com/rest/api/storageservices/set-container-acl#request-body). Fixes [#18712](https://github.com/Azure/azure-sdk-for-go/issues/18712) +* Uploading block blobs larger than 256MB can fail in some cases with error `net/http: HTTP/1.x transport connection broken`. +* Blob name parameters are URL-encoded before constructing the complete blob URL. + +### Other Changes + +* Added some missing public surface area in the `container` and `service` packages. +* The `UploadStream()` methods now use anonymous memory mapped files for buffers in order to reduce heap allocations/fragmentation. + * The anonymous memory mapped files are typically backed by the page/swap file, multiple files are not actually created. + +## 0.5.1 (2022-10-11) + +### Bugs Fixed + +* `GetSASURL()`: for container and blob clients, don't add a forward slash before the query string +* Fixed issue [#19249](https://github.com/Azure/azure-sdk-for-go/issues/19249) by increasing service version to '2020-02-10'. + +### Other Changes + +* Improved docs for client constructors. +* Updating azcore version to 1.1.4 + +## 0.5.0 (2022-09-29) + +### Breaking Changes + +* Complete architectural change for better user experience. Please view the [README](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme) + +### Features Added + +* Added [UserDelegationCredential](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas) which resolves [#18976](https://github.com/Azure/azure-sdk-for-go/issues/18976), [#16916](https://github.com/Azure/azure-sdk-for-go/issues/16916), [#18977](https://github.com/Azure/azure-sdk-for-go/issues/18977) +* Added [Restore Container API](https://learn.microsoft.com/rest/api/storageservices/restore-container). + +### Bugs Fixed + +* Fixed issue [#18767](https://github.com/Azure/azure-sdk-for-go/issues/18767) +* Fix deadlock when error writes are slow [#16937](https://github.com/Azure/azure-sdk-for-go/pull/16937) + +## 0.4.1 (2022-05-12) + +### Other Changes + +* Updated to latest `azcore` and `internal` modules + +## 0.4.0 (2022-04-19) + +### Breaking Changes + +* Fixed Issue #17150 : Renaming/refactoring high level methods. +* Fixed Issue #16972 : Constructors should return clients by reference. +* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags + remains the same. + +### Bugs Fixed + +* Fixed Issue #17515 : SetTags options bag missing leaseID. +* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. +* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. +* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID +* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods + ignoring the options bag. +* Fixed Issue #16920 : Fixing error handling example. +* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. +* Fixed Issue #16679 : Response parsing issue in List blobs API. + +## 0.3.0 (2022-02-09) + +### Breaking Changes + +* Updated to latest `azcore`. Public surface area is unchanged. +* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is + now `*RetryReaderOptions`. + +### Bugs Fixed + +* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource. +* Fixed Issue #16223 : `HttpRange` does not expose its fields. +* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient` +* Fixed Issue #16295 : Problem with listing blobs by using of `ListBlobsHierarchy()` +* Fixed Issue #16542 : Empty `StorageError` in the Azurite environment +* Fixed Issue #16679 : Unable to access Metadata when listing blobs +* Fixed Issue #16816 : `ContainerClient.GetSASToken` doesn't allow list permission. +* Fixed Issue #16988 : Too many arguments in call to `runtime.NewResponseError` + +## 0.2.0 (2021-11-03) + +### Breaking Changes + +* Clients now have one constructor per authentication method + +## 0.1.0 (2021-09-13) + +### Features Added + +* This is the initial preview release of the `azblob` library diff --git a/vendor/github.com/Azure/azure-storage-queue-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt similarity index 100% rename from vendor/github.com/Azure/azure-storage-queue-go/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md new file mode 100644 index 00000000000..1f51959fa3d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -0,0 +1,282 @@ +# Azure Blob Storage module for Go + +> Service Version: 2023-11-03 + +Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob +Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or +definition, such as text or binary data. For more information, see [Introduction to Azure Blob Storage](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction). + +Use the Azure Blob Storage client module `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` to: + +* Authenticate clients with Azure Blob Storage +* Manipulate containers and blobs in an Azure storage account + +Key links: + +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][go_samples] + +## Getting started + +### Prerequisites + +- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install) +- Azure subscription - [Create a free account](https://azure.microsoft.com/free/) +- Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: + +```bash +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS +``` + +### Install the package + +Install the Azure Blob Storage client module for Go with [go get][goget]: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +``` + +If you plan to authenticate with Azure Active Directory (recommended), also install the [azidentity][azidentity] module. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +### Authenticate the client + +To interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. + +```go +// create a credential for authenticating with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle err + +// create an azblob.Client for the specified storage account that uses the above credential +client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/", cred, nil) +// TODO: handle err +``` + +Learn more about enabling Azure Active Directory for authentication with Azure Storage: + +* [Authorize access to blobs using Azure Active Directory][storage_ad] + +Other options for authentication include connection strings, shared key, shared access signatures (SAS), and anonymous public access. Use the appropriate client constructor function for the authentication mechanism you wish to use. For examples, see: + +* [Blob samples][samples] + +## Key concepts + +Blob Storage is designed for: + +- Serving images or documents directly to a browser. +- Storing files for distributed access. +- Streaming video and audio. +- Writing to log files. +- Storing data for backup and restore, disaster recovery, and archiving. +- Storing data for analysis by an on-premises or Azure-hosted service. + +Blob Storage offers three types of resources: + +- The _storage account_ +- One or more _containers_ in a storage account +- One or more _blobs_ in a container + +Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. +The storage account is specified when the `azblob.Client` is constructed. + +### Specialized clients + +The Azure Blob Storage client module for Go also provides specialized clients in various subpackages. Use these clients when you need to interact with a specific kind of blob. Learn more about [block blobs, append blobs, and page blobs](https://learn.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). + +- [appendblob][append_blob] +- [blockblob][block_blob] +- [pageblob][page_blob] + +The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. + +The [lease][lease] package contains clients for managing leases on blobs and containers. See the [REST API reference](https://learn.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. + +The [container][container] package contains APIs specific to containers. This includes APIs for setting access policies or properties, and more. + +The [service][service] package contains APIs specific to the Blob service. This includes APIs for manipulating containers, retrieving account information, and more. + +The [sas][sas] package contains utilities to aid in the creation and manipulation of shared access signature (SAS) tokens. +See the package's documentation for more information. + +### Goroutine safety + +We guarantee that all client instance methods are goroutine-safe and independent of each other (see [guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation to reuse client instances is always safe, even across goroutines. + +### Blob metadata + +Blob metadata name-value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. + +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + + +## Examples + +### Upload a blob + +```go +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" + blobName = "sample-blob" + sampleFile = "path/to/sample/file" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// open the file for reading +file, err := os.OpenFile(sampleFile, os.O_RDONLY, 0) +// TODO: handle error +defer file.Close() + +// upload the file to the specified container with the specified blob name +_, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) +// TODO: handle error +``` + +### Download a blob + +```go +// this example accesses a public blob via anonymous access, so no credentials are required +client, err := azblob.NewClientWithNoCredential("https://azurestoragesamples.blob.core.windows.net/", nil) +// TODO: handle error + +// create or open a local file where we can download the blob +file, err := os.Create("cloud.jpg") +// TODO: handle error +defer file.Close() + +// download the blob +_, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) +// TODO: handle error +``` + +### Enumerate blobs + +```go +const ( + account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/" + containerName = "sample-container" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azblob.NewClient(account, cred, nil) +// TODO: handle error + +// blob listings are returned across multiple pages +pager := client.NewListBlobsFlatPager(containerName, nil) + +// continue fetching pages until no more remain +for pager.More() { + // advance to the next page + page, err := pager.NextPage(context.TODO()) + // TODO: handle error + + // print the blob names for this page + for _, blob := range page.Segment.BlobItems { + fmt.Println(*blob.Name) + } +} +``` + +## Troubleshooting + +All Blob service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [bloberror][blob_error] package provides the possible Storage error codes +along with helper facilities for error handling. + +```go +const ( + connectionString = "" + containerName = "sample-container" +) + +// create a client with the provided connection string +client, err := azblob.NewClientFromConnectionString(connectionString, nil) +// TODO: handle error + +// try to delete the container, avoiding any potential race conditions with an in-progress or completed deletion +_, err = client.DeleteContainer(context.TODO(), containerName, nil) + +if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNotFound) { + // ignore any errors if the container is being deleted or already has been deleted +} else if err != nil { + // TODO: some other error +} +``` + +## Next steps + +Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. + +## Contributing + +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. For +details, visit [cla.microsoft.com][cla]. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. +For more information see the [Code of Conduct FAQ][coc_faq] +or contact [opencode@microsoft.com][coc_contact] with any +additional questions or comments. + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png) + + +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#section_documentation +[rest_docs]: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://learn.microsoft.com/azure/storage/blobs/storage-blobs-overview +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[go_samples]: https://github.com/Azure-Samples/azure-sdk-for-go-samples/tree/main +[storage_account_docs]: https://learn.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://learn.microsoft.com/cli/azure +[azure_sub]: https://azure.microsoft.com/free/ +[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[storage_ad]: https://learn.microsoft.com/azure/storage/common/storage-auth-aad +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go +[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go +[blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blob/client.go +[blob_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/bloberror/error_codes.go +[block_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blockblob/client.go +[container]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/container/client.go +[lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/lease +[page_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/pageblob/client.go +[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/sas +[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/service/client.go +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go new file mode 100644 index 00000000000..06b0fd419f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -0,0 +1,366 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "context" + "errors" + "io" + "os" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a client to an Azure Storage append blob; +type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (ab *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return (*blob.Client)(innerBlob) +} + +func (ab *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) +} + +func (ab *Client) generated() *generated.AppendBlobClient { + _, appendBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab)) + return appendBlob +} + +func (ab *Client) innerBlobGenerated() *generated.BlobClient { + b := ab.BlobClient() + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (ab *Client) URL() string { + return ab.generated().Endpoint() +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (ab *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(ab.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil +} + +// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab *Client) Create(ctx context.Context, o *CreateOptions) (CreateResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + resp, err := ab.generated().Create(ctx, 0, opts, httpHeaders, leaseAccessConditions, cpkInfo, + cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *AppendBlockOptions) (AppendBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return AppendBlockResponse{}, nil + } + + appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format() + + if o != nil && o.TransactionalValidation != nil { + body, err = o.TransactionalValidation.Apply(body, appendOptions) + if err != nil { + return AppendBlockResponse{}, nil + } + } + + resp, err := ab.generated().AppendBlock(ctx, + count, + body, + appendOptions, + leaseAccessConditions, + appendPositionAccessConditions, + cpkInfo, + cpkScope, + modifiedAccessConditions) + + return resp, err +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) { + appendBlockFromURLOptions, + cpkInfo, + cpkScopeInfo, + leaseAccessConditions, + appendPositionAccessConditions, + modifiedAccessConditions, + sourceModifiedAccessConditions := o.format() + + // content length should be 0 on * from URL. always. It's a 400 if it isn't. + resp, err := ab.generated().AppendBlockFromURL(ctx, + source, + 0, + appendBlockFromURLOptions, + cpkInfo, + cpkScopeInfo, + leaseAccessConditions, + appendPositionAccessConditions, + modifiedAccessConditions, + sourceModifiedAccessConditions) + return resp, err +} + +// Seal - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. +// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal +func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) { + leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format() + resp, err := ab.generated().Seal(ctx, + nil, + leaseAccessConditions, + modifiedAccessConditions, + positionAccessConditions) + return resp, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (ab *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return ab.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return ab.BlobClient().Undelete(ctx, o) +} + +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (ab *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { + return ab.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (ab *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { + return ab.BlobClient().DeleteImmutabilityPolicy(ctx, options) +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { + return ab.BlobClient().SetLegalHold(ctx, legalHold, options) +} + +// SetTier +// Deprecated: SetTier only works for page blob in premium storage account and block blob in blob storage account. +func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return blob.SetTierResponse{}, errors.New("operation will not work on this blob type. SetTier only works for page blob in premium storage account and block blob in blob storage account") +} + +// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry +func (ab *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) { + if expiryType == nil { + expiryType = ExpiryTypeNever{} + } + et, opts := expiryType.Format(o) + resp, err := ab.innerBlobGenerated().SetExpiry(ctx, et, opts) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return ab.BlobClient().GetProperties(ctx, o) +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return ab.BlobClient().GetAccountInfo(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return ab.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (ab *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return ab.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (ab *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return ab.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (ab *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return ab.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (ab *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return ab.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return ab.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL +// Deprecated: CopyFromURL works only with block blob +func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return ab.BlobClient().GetSASURL(permissions, expiry, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (ab *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return ab.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (ab *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return ab.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (ab *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return ab.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go new file mode 100644 index 00000000000..0834743f0c6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go @@ -0,0 +1,180 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// AppendPositionAccessConditions contains a group of parameters for the Client.AppendBlock method. +type AppendPositionAccessConditions = generated.AppendPositionAccessConditions + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions provides set of configurations for Create Append Blob operation +type CreateOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + AccessConditions *blob.AccessConditions + + HTTPHeaders *blob.HTTPHeaders + + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo + + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]*string +} + +func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := generated.AppendBlobClientCreateOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method. +type AppendBlockOptions struct { + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo + + AccessConditions *blob.AccessConditions +} + +func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions, + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.AppendBlobClientAppendBlockOptions{}, o.AppendPositionAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. +type AppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. + SourceContentValidation blob.SourceContentValidationType + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + AccessConditions *blob.AccessConditions + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange +} + +func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions, + *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &generated.AppendBlobClientAppendBlockFromURLOptions{ + SourceRange: exported.FormatHTTPRange(o.Range), + CopySourceAuthorization: o.CopySourceAuthorization, + } + + if o.SourceContentValidation != nil { + o.SourceContentValidation.Apply(options) + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SealOptions provides set of configurations for SealAppendBlob operation +type SealOptions struct { + AccessConditions *blob.AccessConditions + AppendPositionAccessConditions *AppendPositionAccessConditions +} + +func (o *SealOptions) format() (*generated.LeaseAccessConditions, + *generated.ModifiedAccessConditions, *generated.AppendPositionAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, modifiedAccessConditions, o.AppendPositionAccessConditions + +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ExpiryType defines values for ExpiryType +type ExpiryType = exported.ExpiryType + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry +type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry +type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry +type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation + +// ExpiryTypeNever defines that the blob will be set to never expire +type ExpiryTypeNever = exported.ExpiryTypeNever + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions = exported.SetExpiryOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go new file mode 100644 index 00000000000..e6851237c91 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/responses.go @@ -0,0 +1,26 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package appendblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.AppendBlobClientCreateResponse + +// AppendBlockResponse contains the response from method Client.AppendBlock. +type AppendBlockResponse = generated.AppendBlobClientAppendBlockResponse + +// AppendBlockFromURLResponse contains the response from method Client.AppendBlockFromURL. +type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLResponse + +// SealResponse contains the response from method Client.Seal. +type SealResponse = generated.AppendBlobClientSealResponse + +// SetExpiryResponse contains the response from method Client.SetExpiry. +type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json new file mode 100644 index 00000000000..d971ff1ec84 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azblob", + "Tag": "go/storage/azblob_71b0a04c12" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go new file mode 100644 index 00000000000..7b55cd1431a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -0,0 +1,471 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + "os" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client base.Client[generated.BlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (b *Client) generated() *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) credential() any { + return base.Credential((*base.Client[generated.BlobClient])(b)) +} + +func (b *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (b *Client) URL() string { + return b.generated().Endpoint() +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (b *Client) WithVersionID(versionID string) (*Client, error) { + p, err := ParseURL(b.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) { + deleteOptions, leaseInfo, accessConditions := o.format() + resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions) + return resp, err +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) { + undeleteOptions := o.format() + resp, err := b.generated().Undelete(ctx, undeleteOptions) + return resp, err +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiers see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format() + resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + return resp, err +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + return resp, err +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *SetMetadataOptions) (SetMetadataResponse, error) { + basics := generated.BlobClientSetMetadataOptions{Metadata: metadata} + leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format() + resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + return resp, err +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this + // performance hit. + opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions) + + return resp, err +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { + opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions) + return resp, err +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) { + serializedTags := shared.SerializeBlobTags(tags) + blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) { + blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + return resp, err + +} + +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. Mode defaults to "Unlocked". +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *SetImmutabilityPolicyOptions) (SetImmutabilityPolicyResponse, error) { + blobSetImmutabilityPolicyOptions, modifiedAccessConditions := options.format() + blobSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry = &expiryTime + resp, err := b.generated().SetImmutabilityPolicy(ctx, blobSetImmutabilityPolicyOptions, modifiedAccessConditions) + return resp, err +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) DeleteImmutabilityPolicy(ctx context.Context, options *DeleteImmutabilityPolicyOptions) (DeleteImmutabilityPolicyResponse, error) { + deleteImmutabilityOptions := options.format() + resp, err := b.generated().DeleteImmutabilityPolicy(ctx, deleteImmutabilityOptions) + return resp, err +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetLegalHoldOptions) (SetLegalHoldResponse, error) { + setLegalHoldOptions := options.format() + resp, err := b.generated().SetLegalHold(ctx, legalHold, setLegalHoldOptions) + return resp, err +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { + copyOptions, smac, mac, lac, cpkScopeInfo := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo) + return resp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (b *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := b.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if b.sharedKey() == nil { + return "", bloberror.MissingSharedKeyCredential + } + + urlParts, err := ParseURL(b.URL()) + if err != nil { + return "", err + } + + t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) + + if err != nil { + t = time.Time{} + } + st := o.format() + + qps, err := sas.BlobSignatureValues{ + ContainerName: urlParts.ContainerName, + BlobName: urlParts.BlobName, + SnapshotTime: t, + Version: sas.Version, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(b.sharedKey()) + + if err != nil { + return "", err + } + + endpoint := b.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// downloadBuffer downloads an Azure blob to a WriterAt in parallel. +func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + count := o.Range.Count + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) + if err != nil { + return 0, err + } + count = *gr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + NumChunks: uint64(((count - 1) / o.BlockSize) + 1), + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, chunkStart int64, count int64) error { + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: count, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return err + } + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return 0, err + } + return count, nil +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format() + if o == nil { + o = &DownloadStreamOptions{} + } + + dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return DownloadStreamResponse{}, err + } + + return DownloadStreamResponse{ + client: b, + DownloadResponse: dr, + getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + cpkInfo: o.CPKInfo, + cpkScope: o.CPKScopeInfo, + }, err +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + if o == nil { + o = &DownloadBufferOptions{} + } + return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + if o == nil { + o = &DownloadFileOptions{} + } + do := (*downloadOptions)(o) + + // 1. Calculate the size of the destination file + var size int64 + + count := do.Range.Count + if count == CountToEnd { + // Try to get Azure blob's size + getBlobPropertiesOptions := do.getBlobPropertiesOptions() + props, err := b.GetProperties(ctx, getBlobPropertiesOptions) + if err != nil { + return 0, err + } + size = *props.ContentLength - do.Range.Offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return 0, err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return 0, err + } + } + + if size > 0 { + return b.downloadBuffer(ctx, file, *do) + } else { // if the blob's size is 0, there is no need in downloading it + return 0, nil + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go new file mode 100644 index 00000000000..daef800ed0b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -0,0 +1,235 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +const ( + CountToEnd = 0 + + SnapshotTimeFormat = exported.SnapshotTimeFormat + + // DefaultDownloadBlockSize is default block size + DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + + // DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel + DefaultConcurrency = shared.DefaultConcurrency +) + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} + +// AccessTier defines values for Blob Access Tier. +type AccessTier = generated.AccessTier + +const ( + AccessTierArchive AccessTier = generated.AccessTierArchive + AccessTierCool AccessTier = generated.AccessTierCool + AccessTierCold AccessTier = generated.AccessTierCold + AccessTierHot AccessTier = generated.AccessTierHot + AccessTierP10 AccessTier = generated.AccessTierP10 + AccessTierP15 AccessTier = generated.AccessTierP15 + AccessTierP20 AccessTier = generated.AccessTierP20 + AccessTierP30 AccessTier = generated.AccessTierP30 + AccessTierP4 AccessTier = generated.AccessTierP4 + AccessTierP40 AccessTier = generated.AccessTierP40 + AccessTierP50 AccessTier = generated.AccessTierP50 + AccessTierP6 AccessTier = generated.AccessTierP6 + AccessTierP60 AccessTier = generated.AccessTierP60 + AccessTierP70 AccessTier = generated.AccessTierP70 + AccessTierP80 AccessTier = generated.AccessTierP80 + AccessTierPremium AccessTier = generated.AccessTierPremium +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleAccessTierValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type. +type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting + +const ( + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return generated.PossibleImmutabilityPolicySettingValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// EncryptionAlgorithmType defines values for EncryptionAlgorithmType. +type EncryptionAlgorithmType = generated.EncryptionAlgorithmType + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256 +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return generated.PossibleEncryptionAlgorithmTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus. +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot + ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// DeleteType defines values for DeleteType. +type DeleteType = generated.DeleteType + +const ( + DeleteTypeNone DeleteType = generated.DeleteTypeNone + DeleteTypePermanent DeleteType = generated.DeleteTypePermanent +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return generated.PossibleDeleteTypeValues() +} + +// QueryFormatType - The quick query format type. +type QueryFormatType = generated.QueryFormatType + +const ( + QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited + QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON + QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow + QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return generated.PossibleQueryFormatTypeValues() +} + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType = exported.TransferValidationType + +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. +type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64 + +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return exported.TransferValidationTypeComputeCRC64() +} + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5 + +// SourceContentValidationType abstracts the various mechanisms used to validate source content. +// This interface is not publicly implementable. +type SourceContentValidationType interface { + Apply(generated.SourceContentSetter) + notPubliclyImplementable() +} + +// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provide a precomputed CRC64. +type SourceContentValidationTypeCRC64 []byte + +// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeCRC64. +func (s SourceContentValidationTypeCRC64) Apply(src generated.SourceContentSetter) { + src.SetSourceContentCRC64(s) +} + +func (SourceContentValidationTypeCRC64) notPubliclyImplementable() {} + +var _ SourceContentValidationType = (SourceContentValidationTypeCRC64)(nil) + +// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provide a precomputed MD5. +type SourceContentValidationTypeMD5 []byte + +// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeMD5. +func (s SourceContentValidationTypeMD5) Apply(src generated.SourceContentSetter) { + src.SetSourceContentMD5(s) +} + +func (SourceContentValidationTypeMD5) notPubliclyImplementable() {} + +var _ SourceContentValidationType = (SourceContentValidationTypeMD5)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go new file mode 100644 index 00000000000..d7334688946 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -0,0 +1,580 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Type Declarations --------------------------------------------------------------------- + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = generated.CPKInfo + +// CPKScopeInfo contains a group of parameters for client provided encryption scope. +type CPKScopeInfo = generated.CPKScopeInfo + +// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type HTTPHeaders = generated.BlobHTTPHeaders + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// Tags represent map of blob index tags +type Tags = generated.BlobTag + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset and zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// DownloadStreamOptions contains the optional parameters for the Client.Download method. +type DownloadStreamOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + AccessConditions *AccessConditions + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo +} + +func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientDownloadOptions{ + RangeGetContentMD5: o.RangeGetContentMD5, + Range: exported.FormatHTTPRange(o.Range), + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions. +type downloadOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions { + if o == nil { + return nil + } + return &GetPropertiesOptions{ + AccessConditions: o.AccessConditions, + CPKInfo: o.CPKInfo, + } +} + +func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions { + if o == nil { + return nil + } + return &DownloadStreamOptions{ + AccessConditions: o.AccessConditions, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + Range: rnge, + RangeGetContentMD5: rangeGetContentMD5, + } +} + +// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // CPKInfo contains a group of parameters for client provided encryption key. + CPKInfo *CPKInfo + + // CPKScopeInfo contains a group of parameters for client provided encryption scope. + CPKScopeInfo *CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// DownloadFileOptions contains the optional parameters for the DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions *AccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5. + Concurrency uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself. + DeleteSnapshots *DeleteSnapshotsOptionType + AccessConditions *AccessConditions + // Setting DeleteType to DeleteTypePermanent will permanently delete soft-delete snapshot and/or version blobs. + // WARNING: This is a dangerous operation and should not be used unless you know the implications. Please proceed + // with caution. + // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob + BlobDeleteType *DeleteType +} + +func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + } + + if o.AccessConditions == nil { + return &basics, nil, nil + } + + return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UndeleteOptions contains the optional parameters for the Client.Undelete method. +type UndeleteOptions struct { + // placeholder for future options +} + +func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTierOptions contains the optional parameters for the Client.SetTier method. +type SetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + AccessConditions *AccessConditions +} + +func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method +type GetPropertiesOptions struct { + AccessConditions *AccessConditions + CPKInfo *CPKInfo +} + +func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + AccessConditions *AccessConditions +} + +func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions provides set of configurations for Set Metadata on blob operation +type SetMetadataOptions struct { + AccessConditions *AccessConditions + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo +} + +func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CPKInfo, + *CPKScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. +type CreateSnapshotOptions struct { + Metadata map[string]*string + AccessConditions *AccessConditions + CPKInfo *CPKInfo + CPKScopeInfo *CPKScopeInfo +} + +func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + + return &generated.BlobClientCreateSnapshotOptions{ + Metadata: o.Metadata, + }, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. +type StartCopyFromURLOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]*string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + AccessConditions *AccessConditions +} + +func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions, + *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := generated.BlobClientStartCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + Metadata: o.Metadata, + RehydratePriority: o.RehydratePriority, + SealBlob: o.SealBlob, + Tier: o.Tier, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method. +type AbortCopyFromURLOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetTagsOptions contains the optional parameters for the Client.SetTags method. +type SetTagsOptions struct { + // The version id parameter is an opaque DateTime value that, when present, + // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + VersionID *string + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Optional header, Specifies the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AccessConditions *AccessConditions +} + +func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientSetTagsOptions{ + TransactionalContentMD5: o.TransactionalContentMD5, + TransactionalContentCRC64: o.TransactionalContentCRC64, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetTagsOptions contains the optional parameters for the Client.GetTags method. +type GetTagsOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. + Snapshot *string + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string + + BlobAccessConditions *AccessConditions +} + +func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &generated.BlobClientGetTagsOptions{ + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, modifiedAccessConditions, leaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetImmutabilityPolicyOptions contains the parameter for Client.SetImmutabilityPolicy +type SetImmutabilityPolicyOptions struct { + // Specifies the immutability policy mode to set on the blob. Possible values to set include: "Locked", "Unlocked". + // "Mutable" can only be returned by service, don't set to "Mutable". If mode is not set - it will default to Unlocked. + Mode *ImmutabilityPolicySetting + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) { + if o == nil { + return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil + } + ac := &exported.BlobAccessConditions{ + ModifiedAccessConditions: o.ModifiedAccessConditions, + } + _, modifiedAccessConditions := exported.FormatBlobAccessConditions(ac) + + options := &generated.BlobClientSetImmutabilityPolicyOptions{ + ImmutabilityPolicyMode: o.Mode, + } + + return options, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteImmutabilityPolicyOptions contains the optional parameters for the Client.DeleteImmutabilityPolicy method. +type DeleteImmutabilityPolicyOptions struct { + // placeholder for future options +} + +func (o *DeleteImmutabilityPolicyOptions) format() *generated.BlobClientDeleteImmutabilityPolicyOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetLegalHoldOptions contains the optional parameters for the Client.SetLegalHold method. +type SetLegalHoldOptions struct { + // placeholder for future options +} + +func (o *SetLegalHoldOptions) format() *generated.BlobClientSetLegalHoldOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method. +type CopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTags map[string]string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *AccessConditions + + CPKScopeInfo *CPKScopeInfo +} + +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &generated.BlobClientCopyFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags), + CopySourceAuthorization: o.CopySourceAuthorization, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.BlobClientGetAccountInfoOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go new file mode 100644 index 00000000000..352d975264c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// DownloadResponse contains the response from method BlobClient.Download. +type DownloadResponse = generated.BlobClientDownloadResponse + +// DownloadStreamResponse contains the response from the DownloadStream method. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + DownloadResponse + ObjectReplicationRules []ObjectReplicationPolicy + + client *Client + getInfo httpGetterInfo + cpkInfo *CPKInfo + cpkScope *CPKScopeInfo +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DownloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + accessConditions := &AccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag}, + } + options := DownloadStreamOptions{ + Range: getInfo.Range, + AccessConditions: accessConditions, + CPKInfo: r.cpkInfo, + CPKScopeInfo: r.cpkScope, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// DeleteResponse contains the response from method BlobClient.Delete. +type DeleteResponse = generated.BlobClientDeleteResponse + +// UndeleteResponse contains the response from method BlobClient.Undelete. +type UndeleteResponse = generated.BlobClientUndeleteResponse + +// SetTierResponse contains the response from method BlobClient.SetTier. +type SetTierResponse = generated.BlobClientSetTierResponse + +// GetPropertiesResponse contains the response from method BlobClient.GetProperties. +type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse + +// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse + +// SetMetadataResponse contains the response from method BlobClient.SetMetadata. +type SetMetadataResponse = generated.BlobClientSetMetadataResponse + +// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse + +// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse + +// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse + +// SetTagsResponse contains the response from method BlobClient.SetTags. +type SetTagsResponse = generated.BlobClientSetTagsResponse + +// GetTagsResponse contains the response from method BlobClient.GetTags. +type GetTagsResponse = generated.BlobClientGetTagsResponse + +// SetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type SetImmutabilityPolicyResponse = generated.BlobClientSetImmutabilityPolicyResponse + +// DeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicyResponse. +type DeleteImmutabilityPolicyResponse = generated.BlobClientDeleteImmutabilityPolicyResponse + +// SetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse + +// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse + +// GetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type GetAccountInfoResponse = generated.BlobClientGetAccountInfoResponse + +// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse + +// BreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse + +// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse + +// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse + +// RenewLeaseResponse contains the response from method BlobClient.RenewLease. +type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go similarity index 55% rename from vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index ad38f597ed2..1deedb5902e 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -1,52 +1,47 @@ -package azblob +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob import ( "context" "io" "net" - "net/http" "strings" "sync" -) -const CountToEnd = 0 + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) // HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. -type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) // HTTPGetterInfo is passed to an HTTPGetter function passing it parameters // that should be used to make an HTTP GET request. -type HTTPGetterInfo struct { - // Offset specifies the start offset that should be used when - // creating the HTTP GET request's Range header - Offset int64 - - // Count specifies the count of bytes that should be used to calculate - // the end offset when creating the HTTP GET request's Range header - Count int64 +type httpGetterInfo struct { + Range HTTPRange // ETag specifies the resource's etag that should be used when creating // the HTTP GET request's If-Match header - ETag ETag + ETag *azcore.ETag } -// FailedReadNotifier is a function type that represents the notification function called when a read fails -type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) - -// RetryReaderOptions contains properties which can help to decide when to do retry. +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. type RetryReaderOptions struct { - // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made - // while reading from a RetryReader. A value of zero means that no additional HTTP - // GET requests will be made. - MaxRetryRequests int - doInjectError bool - doInjectErrorRound int - injectedError error + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 - // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. - NotifyFailedRead FailedReadNotifier + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) - // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, // retryReader has the following special behaviour: closing the response body before it is all read is treated as a // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If @@ -55,51 +50,59 @@ type RetryReaderOptions struct { // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors // which will be retried. - TreatEarlyCloseAsError bool + // The default value is false. + EarlyCloseAsError bool - ClientProvidedKeyOptions ClientProvidedKeyOptions + doInjectError bool + doInjectErrorRound int32 + injectedError error } -// retryReader implements io.ReaderCloser methods. -// retryReader tries to read from response, and if there is retriable network error +// RetryReader attempts to read from response, and if there is a retry-able network error // returned during reading, it will retry according to retry reader option through executing // user defined action with provided data to get a new response, and continue the overall reading process // through reading from the new response. -type retryReader struct { - ctx context.Context - info HTTPGetterInfo - countWasBounded bool - o RetryReaderOptions - getter HTTPGetter +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response responseMu *sync.Mutex - response *http.Response + response io.ReadCloser } -// NewRetryReader creates a retry reader. -func NewRetryReader(ctx context.Context, initialResponse *http.Response, - info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { - return &retryReader{ - ctx: ctx, - getter: getter, - info: info, - countWasBounded: info.Count != CountToEnd, - response: initialResponse, - responseMu: &sync.Mutex{}, - o: o} +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } } -func (s *retryReader) setResponse(r *http.Response) { +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { s.responseMu.Lock() defer s.responseMu.Unlock() s.response = r } -func (s *retryReader) Read(p []byte) (n int, err error) { - for try := 0; ; try++ { +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { //fmt.Println(try) // Comment out for debugging. - if s.countWasBounded && s.info.Count == CountToEnd { + if s.countWasBounded && s.info.Range.Count == CountToEnd { // User specified an original count and the remaining bytes are 0, return 0, EOF return 0, io.EOF } @@ -116,12 +119,12 @@ func (s *retryReader) Read(p []byte) (n int, err error) { s.setResponse(newResponse) resp = newResponse } - n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) // Injection mechanism for testing. - if s.o.doInjectError && try == s.o.doInjectErrorRound { - if s.o.injectedError != nil { - err = s.o.injectedError + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError } else { err = &net.DNSError{IsTemporary: true} } @@ -129,25 +132,26 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // We successfully read data or end EOF. if err == nil || err == io.EOF { - s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future - if s.info.Count != CountToEnd { - s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future } return n, err // Return the return to the caller } - s.Close() // Error, close stream + _ = s.Close() + s.setResponse(nil) // Our stream is no longer good // Check the retry count and error code, and decide whether to retry. - retriesExhausted := try >= s.o.MaxRetryRequests + retriesExhausted := try >= s.retryReaderOptions.MaxRetries _, isNetError := err.(net.Error) isUnexpectedEOF := err == io.ErrUnexpectedEOF willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted // Notify, for logging purposes, of any failures - if s.o.NotifyFailedRead != nil { + if s.retryReaderOptions.OnFailedRead != nil { failureCount := try + 1 // because try is zero-based - s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) + s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) } if willRetry { @@ -163,24 +167,26 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" // which is exactly the behaviour we want. // NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) -// then there are two different types of error that may happen - either the one one we check for here, +// then there are two different types of error that may happen - either the one we check for here, // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine // to check for one, since the other is a net.Error, which our main Read retry loop is already handing. -func (s *retryReader) wasRetryableEarlyClose(err error) bool { - if s.o.TreatEarlyCloseAsError { +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { return false // user wants all early closes to be errors, and so not retryable } // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) } +// ReadOnClosedBodyMessage of retry reader const ReadOnClosedBodyMessage = "read on closed response body" -func (s *retryReader) Close() error { +// Close retry reader +func (s *RetryReader) Close() error { s.responseMu.Lock() defer s.responseMu.Unlock() - if s.response != nil && s.response.Body != nil { - return s.response.Body.Close() + if s.response != nil { + return s.response.Close() } return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go new file mode 100644 index 00000000000..c2d517d8ad2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/utils.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blob + +import ( + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ObjectReplicationRules struct +type ObjectReplicationRules struct { + RuleID string + Status string +} + +// ObjectReplicationPolicy are deserialized attributes. +type ObjectReplicationPolicy struct { + PolicyID *string + Rules *[]ObjectReplicationRules +} + +// deserializeORSPolicies is utility function to deserialize ORS Policies. +func deserializeORSPolicies(policies map[string]*string) (objectReplicationPolicies []ObjectReplicationPolicy) { + if policies == nil { + return nil + } + // For source blobs (blobs that have policy ids and rule ids applied to them), + // the header will be formatted as "x-ms-or-_: {Complete, Failed}". + // The value of this header is the status of the replication. + orPolicyStatusHeader := make(map[string]*string) + for key, value := range policies { + if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { + orPolicyStatusHeader[key] = value + } + } + + parsedResult := make(map[string][]ObjectReplicationRules) + for key, value := range orPolicyStatusHeader { + policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") + policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] + + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleID: ruleId, Status: *value}) + } + + for policyId, rules := range parsedResult { + objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ + PolicyID: &policyId, + Rules: &rules, + }) + } + return +} + +// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders. +func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders { + return HTTPHeaders{ + BlobContentType: resp.ContentType, + BlobContentEncoding: resp.ContentEncoding, + BlobContentLanguage: resp.ContentLanguage, + BlobContentDisposition: resp.ContentDisposition, + BlobCacheControl: resp.CacheControl, + BlobContentMD5: resp.ContentMD5, + } +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go new file mode 100644 index 00000000000..07fad60611b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -0,0 +1,159 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package bloberror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + BlobAlreadyExists Code = "BlobAlreadyExists" + BlobArchived Code = "BlobArchived" + BlobBeingRehydrated Code = "BlobBeingRehydrated" + BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy" + BlobNotArchived Code = "BlobNotArchived" + BlobNotFound Code = "BlobNotFound" + BlobOverwritten Code = "BlobOverwritten" + BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength" + BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption" + BlockCountExceedsLimit Code = "BlockCountExceedsLimit" + BlockListTooLong Code = "BlockListTooLong" + CannotChangeToLowerTier Code = "CannotChangeToLowerTier" + CannotVerifyCopySource Code = "CannotVerifyCopySource" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + ContainerAlreadyExists Code = "ContainerAlreadyExists" + ContainerBeingDeleted Code = "ContainerBeingDeleted" + ContainerDisabled Code = "ContainerDisabled" + ContainerNotFound Code = "ContainerNotFound" + ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit" + CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported" + CopyIDMismatch Code = "CopyIdMismatch" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" + IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" + IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" + InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidBlobOrBlock Code = "InvalidBlobOrBlock" + InvalidBlobTier Code = "InvalidBlobTier" + InvalidBlobType Code = "InvalidBlobType" + InvalidBlockID Code = "InvalidBlockId" + InvalidBlockList Code = "InvalidBlockList" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidOperation Code = "InvalidOperation" + InvalidPageRange Code = "InvalidPageRange" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidSourceBlobType Code = "InvalidSourceBlobType" + InvalidSourceBlobURL Code = "InvalidSourceBlobUrl" + InvalidURI Code = "InvalidUri" + InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + LeaseAlreadyBroken Code = "LeaseAlreadyBroken" + LeaseAlreadyPresent Code = "LeaseAlreadyPresent" + LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation" + LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation" + LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation" + LeaseIDMissing Code = "LeaseIdMissing" + LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired" + LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged" + LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed" + LeaseLost Code = "LeaseLost" + LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation" + LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation" + LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation" + MD5Mismatch Code = "Md5Mismatch" + CRC64Mismatch Code = "Crc64Mismatch" + MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + NoAuthenticationInformation Code = "NoAuthenticationInformation" + NoPendingCopyOperation Code = "NoPendingCopyOperation" + OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PendingCopyOperation Code = "PendingCopyOperation" + PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer" + PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound" + PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet" + SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge" + ServerBusy Code = "ServerBusy" + SnapshotCountExceeded Code = "SnapshotCountExceeded" + SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded" + SnapshotsPresent Code = "SnapshotsPresent" + SourceConditionNotMet Code = "SourceConditionNotMet" + SystemInUse Code = "SystemInUse" + TargetConditionNotMet Code = "TargetConditionNotMet" + UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") + UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated") +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go new file mode 100644 index 00000000000..24df42c75ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -0,0 +1,249 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "io" + "sync" + "sync/atomic" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error) + Upload(context.Context, io.ReadSeekCloser, *UploadOptions) (UploadResponse, error) + CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) { + options.setDefaults() + + wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing + errCh := make(chan error, 1) // contains the first error encountered during processing + + buffers := getBufferManager(options.Concurrency, options.BlockSize) + defer buffers.Free() + + // this controls the lifetime of the uploading goroutines. + // if an error is encountered, cancel() is called which will terminate all uploads. + // NOTE: the ordering is important here. cancel MUST execute before + // cleaning up the buffers so that any uploading goroutines exit first, + // releasing their buffers back to the pool for cleanup. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // all blocks have IDs that start with a random UUID + blockIDPrefix, err := uuid.New() + if err != nil { + return CommitBlockListResponse{}, err + } + tracker := blockTracker{ + blockIDPrefix: blockIDPrefix, + options: options, + } + + // This goroutine grabs a buffer, reads from the stream into the buffer, + // then creates a goroutine to upload/stage the block. + for blockNum := uint32(0); true; blockNum++ { + var buffer T + select { + case buffer = <-buffers.Acquire(): + // got a buffer + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return CommitBlockListResponse{}, err + } + + // either grab the newly allocated buffer or wait for one to become available + buffer = <-buffers.Acquire() + } + + var n int + n, err = shared.ReadAtLeast(src, buffer, len(buffer)) + + if n > 0 { + // some data was read, upload it + wg.Add(1) // We're posting a buffer to be sent + + // NOTE: we must pass blockNum as an arg to our goroutine else + // it's captured by reference and can change underneath us! + go func(blockNum uint32) { + // Upload the outgoing block, matching the number of bytes read + err := tracker.uploadBlock(ctx, dst, blockNum, buffer[:n]) + if err != nil { + select { + case errCh <- err: + // error was set + default: + // some other error is already set + } + cancel() + } + buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now + + // signal that the block has been staged. + // we MUST do this after attempting to write to errCh + // to avoid it racing with the reading goroutine. + wg.Done() + }(blockNum) + } else { + // nothing was read so the buffer is empty, send it back for reuse/clean-up. + buffers.Release(buffer) + } + + if err != nil { // The reader is done, no more outgoing buffers + if errors.Is(err, io.EOF) { + // these are expected errors, we don't surface those + err = nil + } else { + // some other error happened, terminate any outstanding uploads + cancel() + } + break + } + } + + wg.Wait() // Wait for all outgoing blocks to complete + + if err != nil { + // there was an error reading from src, favor this error over any error during staging + return CommitBlockListResponse{}, err + } + + select { + case err = <-errCh: + // there was an error during staging + return CommitBlockListResponse{}, err + default: + // no error was encountered + } + + // If no error, after all blocks uploaded, commit them to the blob & return the result + return tracker.commitBlocks(ctx, dst) +} + +// used to manage the uploading and committing of blocks +type blockTracker struct { + blockIDPrefix uuid.UUID // UUID used with all blockIDs + maxBlockNum uint32 // defaults to 0 + firstBlock []byte // Used only if maxBlockNum is 0 + options UploadStreamOptions +} + +func (bt *blockTracker) uploadBlock(ctx context.Context, to blockWriter, num uint32, buffer []byte) error { + if num == 0 { + bt.firstBlock = buffer + + // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation + // If the payload is exactly the same size as the buffer, there may be more content coming in. + if len(buffer) < int(bt.options.BlockSize) { + return nil + } + } else { + // Else, upload a staged block... + atomicMorphUint32(&bt.maxBlockNum, func(startVal uint32) (val uint32, morphResult uint32) { + // Atomically remember (in t.numBlocks) the maximum block num we've ever seen + if startVal < num { + return num, 0 + } + return startVal, 0 + }) + } + + blockID := newUUIDBlockID(bt.blockIDPrefix).WithBlockNumber(num).ToBase64() + _, err := to.StageBlock(ctx, blockID, streaming.NopCloser(bytes.NewReader(buffer)), bt.options.getStageBlockOptions()) + return err +} + +func (bt *blockTracker) commitBlocks(ctx context.Context, to blockWriter) (CommitBlockListResponse, error) { + // If the first block had the exact same size as the buffer + // we would have staged it as a block thinking that there might be more data coming + if bt.maxBlockNum == 0 && len(bt.firstBlock) < int(bt.options.BlockSize) { + // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation + up, err := to.Upload(ctx, streaming.NopCloser(bytes.NewReader(bt.firstBlock)), bt.options.getUploadOptions()) + if err != nil { + return CommitBlockListResponse{}, err + } + + // convert UploadResponse to CommitBlockListResponse + return CommitBlockListResponse{ + ClientRequestID: up.ClientRequestID, + ContentMD5: up.ContentMD5, + Date: up.Date, + ETag: up.ETag, + EncryptionKeySHA256: up.EncryptionKeySHA256, + EncryptionScope: up.EncryptionScope, + IsServerEncrypted: up.IsServerEncrypted, + LastModified: up.LastModified, + RequestID: up.RequestID, + Version: up.Version, + VersionID: up.VersionID, + //ContentCRC64: up.ContentCRC64, doesn't exist on UploadResponse + }, nil + } + + // Multiple blocks staged, commit them all now + blockID := newUUIDBlockID(bt.blockIDPrefix) + blockIDs := make([]string, bt.maxBlockNum+1) + for bn := uint32(0); bn < bt.maxBlockNum+1; bn++ { + blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() + } + + return to.CommitBlockList(ctx, blockIDs, bt.options.getCommitBlockListOptions()) +} + +// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. +// The AtomicMorpher callback is passed a startValue and based on this value it returns +// what the new value should be and the result that AtomicMorph should return to its caller. +type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult uint32) + +// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. +func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) uint32 { + for { + currentVal := atomic.LoadUint32(target) + desiredVal, morphResult := morpher(currentVal) + if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { + return morphResult + } + } +} + +type blockID [64]byte + +func (blockID blockID) ToBase64() string { + return base64.StdEncoding.EncodeToString(blockID[:]) +} + +type uuidBlockID blockID + +func newUUIDBlockID(u uuid.UUID) uuidBlockID { + ubi := uuidBlockID{} // Create a new uuidBlockID + copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it + // Block number defaults to 0 + return ubi +} + +func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { + binary.BigEndian.PutUint32(ubi[len(uuid.UUID{}):], blockNumber) // Put block number after UUID + return ubi // Return the passed-in copy +} + +func (ubi uuidBlockID) ToBase64() string { + return blockID(ubi).ToBase64() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go new file mode 100644 index 00000000000..8901f1dbd51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -0,0 +1,597 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "io" + "math" + "os" + "reflect" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client defines a set of operations applicable to block blobs. +type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (bb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) +} + +func (bb *Client) generated() *generated.BlockBlobClient { + _, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return blockBlob +} + +func (bb *Client) innerBlobGenerated() *generated.BlobClient { + b := bb.BlobClient() + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +// URL returns the URL endpoint used by the Client object. +func (bb *Client) URL() string { + return bb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this BlockBlob client. +func (bb *Client) BlobClient() *blob.Client { + blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) + return (*blob.Client)(blobClient) +} + +// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (bb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(bb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return UploadResponse{}, err + } + + opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return UploadResponse{}, err + } + } + + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) + return resp, err +} + +// UploadBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. Partial updates are not supported with Put Blob from URL; the content of an existing blob is overwritten +// with the content of the new blob. To perform partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// For more information, see https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url +func (bb *Client) UploadBlobFromURL(ctx context.Context, copySource string, options *UploadBlobFromURLOptions) (UploadBlobFromURLResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions := options.format() + + resp, err := bb.generated().PutBlobFromURL(ctx, int64(0), copySource, opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions) + + return resp, err +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return StageBlockResponse{}, err + } + + opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return StageBlockResponse{}, nil + } + } + + resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo) + return resp, err +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) { + + stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, 0, sourceURL, stageBlockFromURLOptions, + cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) { + // this is a code smell in the generated code + blockIds := make([]*string, len(base64BlockIDs)) + for k, v := range base64BlockIDs { + blockIds[k] = to.Ptr(v) + } + + blockLookupList := generated.BlockLookupList{Latest: blockIds} + + var commitOptions *generated.BlockBlobClientCommitBlockListOptions + var headers *generated.BlobHTTPHeaders + var leaseAccess *blob.LeaseAccessConditions + var cpkInfo *generated.CPKInfo + var cpkScope *generated.CPKScopeInfo + var modifiedAccess *generated.ModifiedAccessConditions + + if options != nil { + commitOptions = &generated.BlockBlobClientCommitBlockListOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags), + Metadata: options.Metadata, + RequestID: options.RequestID, + Tier: options.Tier, + Timeout: options.Timeout, + TransactionalContentCRC64: options.TransactionalContentCRC64, + TransactionalContentMD5: options.TransactionalContentMD5, + LegalHold: options.LegalHold, + ImmutabilityPolicyMode: options.ImmutabilityPolicyMode, + ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime, + } + + // If user attempts to pass in their own checksum, errors out. + if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil { + return CommitBlockListResponse{}, bloberror.UnsupportedChecksum + } + + headers = options.HTTPHeaders + leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) + cpkInfo = options.CPKInfo + cpkScope = options.CPKScopeInfo + } + + resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) + return resp, err +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) { + o, lac, mac := options.format() + + resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac) + + return resp, err +} + +// Redeclared APIs ----- Copy over to Append blob and Page blob as well. + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return bb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return bb.BlobClient().Undelete(ctx, o) +} + +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { + return bb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { + return bb.BlobClient().DeleteImmutabilityPolicy(ctx, options) +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (bb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { + return bb.BlobClient().SetLegalHold(ctx, legalHold, options) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return bb.BlobClient().SetTier(ctx, tier, o) +} + +// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry +func (bb *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) { + if expiryType == nil { + expiryType = ExpiryTypeNever{} + } + et, opts := expiryType.Format(o) + resp, err := bb.innerBlobGenerated().SetExpiry(ctx, et, opts) + return resp, err +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return bb.BlobClient().GetProperties(ctx, o) +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return bb.BlobClient().GetAccountInfo(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return bb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return bb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return bb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return bb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return bb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return bb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return bb.BlobClient().GetSASURL(permissions, expiry, o) +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in blocks to a block blob. +func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error + if actualSize > MaxStageBlockBytes*MaxBlocks { + return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if actualSize <= MaxUploadBlobBytes { + o.BlockSize = MaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = int64(math.Ceil(float64(actualSize) / MaxBlocks)) // ceil(buffer / max blocks) = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = blob.DefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). + } + } + + if actualSize <= MaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, actualSize) + if o.Progress != nil { + body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) + } + + uploadBlockBlobOptions := o.getUploadBlockBlobOptions() + resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions) + + return toUploadReaderAtResponseFromUploadResponse(resp), err + } + + var numBlocks = uint16(((actualSize - 1) / o.BlockSize) + 1) + if numBlocks > MaxBlocks { + // prevent any math bugs from attempting to upload too many blocks which will always fail + return uploadFromReaderResponse{}, errors.New("block limit exceeded") + } + + if log.Should(exported.EventUpload) { + urlparts, err := blob.ParseURL(bb.generated().Endpoint()) + if err == nil { + log.Writef(exported.EventUpload, "blob name %s actual size %v block-size %v block-count %v", + urlparts.BlobName, actualSize, o.BlockSize, numBlocks) + } + } + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: actualSize, + ChunkSize: o.BlockSize, + NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1), + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, offset int64, chunkSize int64) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + if chunkSize < o.BlockSize { + // this is the last block. its actual size might be less + // than the calculated size due to rounding up of the payload + // size to fit in a whole number of blocks. + chunkSize = (actualSize - offset) + } + var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = streaming.NewRequestProgress(shared.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + generatedUuid, err := uuid.New() + if err != nil { + return err + } + blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) + stageBlockOptions := o.getStageBlockOptions() + _, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions) + return err + }, + }) + if err != nil { + return uploadFromReaderResponse{}, err + } + // All put blocks were successful, call Put Block List to finalize the blob + commitBlockListOptions := o.getCommitBlockListOptions() + resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) + + return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadBufferResponse{}, bloberror.UnsupportedChecksum + } + + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) +} + +// UploadFile uploads a file in blocks to a block blob. +func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + stat, err := file.Stat() + if err != nil { + return uploadFromReaderResponse{}, err + } + uploadOptions := uploadFromReaderOptions{} + if o != nil { + uploadOptions = *o + } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadFileResponse{}, bloberror.UnsupportedChecksum + } + + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + if o == nil { + o = &UploadStreamOptions{} + } + + // If user attempts to pass in their own checksum, errors out. + if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func { + return UploadStreamResponse{}, bloberror.UnsupportedChecksum + } + + result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool) + if err != nil { + return CommitBlockListResponse{}, err + } + + return result, nil +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return bb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return bb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go new file mode 100644 index 00000000000..ce3a5d8de3f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -0,0 +1,52 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const ( + // CountToEnd specifies the end of the file. + CountToEnd = 0 + + _1MiB = 1024 * 1024 + + // MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB + + // MaxBlocks indicates the maximum number of blocks allowed in a block blob. + MaxBlocks = 50000 +) + +// BlockListType defines values for BlockListType +type BlockListType = generated.BlockListType + +const ( + BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted + BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted + BlockListTypeAll BlockListType = generated.BlockListTypeAll +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return generated.PossibleBlockListTypeValues() +} + +// BlobCopySourceTags - can be 'COPY' or 'REPLACE' +type BlobCopySourceTags = generated.BlobCopySourceTags + +const ( + BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY + BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return generated.PossibleBlobCopySourceTagsValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go new file mode 100644 index 00000000000..453d569e5d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -0,0 +1,411 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// Type Declarations --------------------------------------------------------------------- + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block = generated.Block + +// BlockList - can be uncommitted or committed blocks (committed/uncommitted) +type BlockList = generated.BlockList + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// UploadOptions contains the optional parameters for the Client.Upload method. +type UploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType + + HTTPHeaders *blob.HTTPHeaders + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + AccessConditions *blob.AccessConditions + LegalHold *bool + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead + TransactionalContentMD5 []byte +} + +func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + basics := generated.BlockBlobClientUploadOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + LegalHold: o.LegalHold, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiryTime, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadBlobFromURLOptions contains the optional parameters for the Client.UploadBlobFromURL method. +type UploadBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Specifies the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Additional optional headers + HTTPHeaders *blob.HTTPHeaders + AccessConditions *blob.AccessConditions + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions +} + +func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFromURLOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, + *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := generated.BlockBlobClientPutBlobFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + CopySourceAuthorization: o.CopySourceAuthorization, + CopySourceBlobProperties: o.CopySourceBlobProperties, + CopySourceTags: o.CopySourceTags, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +type StageBlockOptions struct { + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo + + LeaseAccessConditions *blob.LeaseAccessConditions + + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType +} + +// StageBlockOptions contains the optional parameters for the Client.StageBlock method. +func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo) { + if o == nil { + return nil, nil, nil, nil + } + + return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CPKInfo, o.CPKScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method. +type StageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + LeaseAccessConditions *blob.LeaseAccessConditions + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. + SourceContentValidation blob.SourceContentValidationType + + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo +} + +func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &generated.BlockBlobClientStageBlockFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + SourceRange: exported.FormatHTTPRange(o.Range), + } + + if o.SourceContentValidation != nil { + o.SourceContentValidation.Apply(options) + } + + return options, o.CPKInfo, o.CPKScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method. +type CommitBlockListOptions struct { + Tags map[string]string + Metadata map[string]*string + RequestID *string + Tier *blob.AccessTier + Timeout *int32 + HTTPHeaders *blob.HTTPHeaders + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + AccessConditions *blob.AccessConditions + LegalHold *bool + ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting + ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentCRC64 cannot be generated + TransactionalContentCRC64 []byte + + // Deprecated: TransactionalContentMD5 cannot be generated + TransactionalContentMD5 []byte +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method. +type GetBlockListOptions struct { + Snapshot *string + AccessConditions *blob.AccessConditions +} + +func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions +} + +// ------------------------------------------------------------ + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // HTTPHeaders indicates the HTTP headers to be associated with the blob. + HTTPHeaders *blob.HTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata map[string]*string + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions *blob.AccessConditions + + // AccessTier indicates the tier of blob + AccessTier *blob.AccessTier + + // BlobTags + Tags map[string]string + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + + // Concurrency indicates the maximum number of blocks to upload in parallel (0=default) + Concurrency uint16 + + TransactionalValidation blob.TransferValidationType + + // Deprecated: TransactionalContentCRC64 cannot be generated at block level + TransactionalContentCRC64 uint64 + + // Deprecated: TransactionalContentMD5 cannot be generated at block level + TransactionalContentMD5 []byte +} + +// UploadBufferOptions provides set of configurations for UploadBuffer operation. +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for UploadFile operation. +type UploadFileOptions = uploadFromReaderOptions + +func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions { + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions) + return &StageBlockOptions{ + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + + TransactionalValidation: o.TransactionalValidation, + } +} + +func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions { + return &UploadOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + AccessConditions: o.AccessConditions, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + } +} + +func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions { + return &CommitBlockListOptions{ + Tags: o.Tags, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + CPKInfo: o.CPKInfo, + CPKScopeInfo: o.CPKScopeInfo, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for UploadStream operation. +type UploadStreamOptions struct { + // BlockSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB. + BlockSize int64 + + // Concurrency defines the max number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size BlockSize. The default value is one. + Concurrency int + + TransactionalValidation blob.TransferValidationType + + HTTPHeaders *blob.HTTPHeaders + Metadata map[string]*string + AccessConditions *blob.AccessConditions + AccessTier *blob.AccessTier + Tags map[string]string + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo +} + +func (u *UploadStreamOptions) setDefaults() { + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.BlockSize < _1MiB { + u.BlockSize = _1MiB + } +} + +func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions { + if u == nil { + return nil + } + + leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions) + return &StageBlockOptions{ + TransactionalValidation: u.TransactionalValidation, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions { + if u == nil { + return nil + } + + return &CommitBlockListOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + AccessConditions: u.AccessConditions, + } +} + +func (u *UploadStreamOptions) getUploadOptions() *UploadOptions { + if u == nil { + return nil + } + + return &UploadOptions{ + Tags: u.Tags, + Metadata: u.Metadata, + Tier: u.AccessTier, + HTTPHeaders: u.HTTPHeaders, + CPKInfo: u.CPKInfo, + CPKScopeInfo: u.CPKScopeInfo, + AccessConditions: u.AccessConditions, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ExpiryType defines values for ExpiryType. +type ExpiryType = exported.ExpiryType + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry. +type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry. +type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry. +type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation + +// ExpiryTypeNever defines that the blob will be set to never expire. +type ExpiryTypeNever = exported.ExpiryTypeNever + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions = exported.SetExpiryOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go new file mode 100644 index 00000000000..917f7180977 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -0,0 +1,117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package blockblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// UploadResponse contains the response from method Client.Upload. +type UploadResponse = generated.BlockBlobClientUploadResponse + +// UploadBlobFromURLResponse contains the response from the method Client.UploadBlobFromURL +type UploadBlobFromURLResponse = generated.BlockBlobClientPutBlobFromURLResponse + +// StageBlockResponse contains the response from method Client.StageBlock. +type StageBlockResponse = generated.BlockBlobClientStageBlockResponse + +// CommitBlockListResponse contains the response from method Client.CommitBlockList. +type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse + +// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL. +type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse + +// GetBlockListResponse contains the response from method Client.GetBlockList. +type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse + +// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type uploadFromReaderResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + // Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB) + ContentCRC64 []byte +} + +func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + } +} + +func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse { + return uploadFromReaderResponse{ + ClientRequestID: resp.ClientRequestID, + ContentMD5: resp.ContentMD5, + Date: resp.Date, + ETag: resp.ETag, + EncryptionKeySHA256: resp.EncryptionKeySHA256, + EncryptionScope: resp.EncryptionScope, + IsServerEncrypted: resp.IsServerEncrypted, + LastModified: resp.LastModified, + RequestID: resp.RequestID, + Version: resp.Version, + VersionID: resp.VersionID, + ContentCRC64: resp.ContentCRC64, + } +} + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = uploadFromReaderResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = uploadFromReaderResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = CommitBlockListResponse + +// SetExpiryResponse contains the response from method Client.SetExpiry. +type SetExpiryResponse = generated.BlobClientSetExpiryResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml new file mode 100644 index 00000000000..2259336b2bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -0,0 +1,34 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + + +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azblob' + RunLiveTests: true + UsePipelineProxy: false + EnvVars: + AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) + AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZBLOB_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZBLOB_SUBSCRIPTION_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go new file mode 100644 index 00000000000..c511d8a79f2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -0,0 +1,164 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "context" + "io" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type Client struct { + svc *service.Client +} + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + svcClient, err := service.NewClient(serviceURL, cred, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + svcClient, err := service.NewClientWithNoCredential(serviceURL, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + + return &Client{ + svc: svcClient, + }, nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + svcClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + if err != nil { + return nil, err + } + return &Client{ + svc: svcClient, + }, nil +} + +// URL returns the URL endpoint used by the BlobClient object. +func (c *Client) URL() string { + return c.svc.URL() +} + +// ServiceClient returns the embedded service client for this client. +func (c *Client) ServiceClient() *service.Client { + return c.svc +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) { + return c.svc.CreateContainer(ctx, containerName, o) +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (c *Client) DeleteContainer(ctx context.Context, containerName string, o *DeleteContainerOptions) (DeleteContainerResponse, error) { + return c.svc.DeleteContainer(ctx, containerName, o) +} + +// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName string, o *DeleteBlobOptions) (DeleteBlobResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o) +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o) +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (c *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + return c.svc.NewListContainersPager(o) +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (c *Client) UploadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadBuffer(ctx, buffer, o) +} + +// UploadFile uploads a file in blocks to a block blob. +func (c *Client) UploadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadFile(ctx, file, o) +} + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (c *Client) UploadStream(ctx context.Context, containerName string, blobName string, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) { + return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadStream(ctx, body, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (c *Client) DownloadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *DownloadBufferOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (c *Client) DownloadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *DownloadFileOptions) (int64, error) { + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadFile(ctx, file, o) +} + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (c *Client) DownloadStream(ctx context.Context, containerName string, blobName string, o *DownloadStreamOptions) (DownloadStreamResponse, error) { + o = shared.CopyOptions(o) + return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadStream(ctx, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go new file mode 100644 index 00000000000..48771e8c9c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset and zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go new file mode 100644 index 00000000000..c42fcdec7f9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// PublicAccessType defines values for AccessType - private (default) or blob or container. +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType. +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go new file mode 100644 index 00000000000..83edea72b2f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go new file mode 100644 index 00000000000..0e43ed015e8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -0,0 +1,437 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type Client base.Client[generated.ContainerClient] + +// NewClient creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a container or with a shared access signature (SAS) token. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - containerURL - the URL of the container e.g. https://.blob.core.windows.net/container +// - cred - a SharedKeyCredential created with the matching container's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (c *Client) generated() *generated.ContainerClient { + return base.InnerClient((*base.Client[generated.ContainerClient])(c)) +} + +func (c *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ContainerClient])(c)) +} + +func (c *Client) credential() any { + return base.Credential((*base.Client[generated.ContainerClient])(c)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (c *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ContainerClient])(c)) +} + +// URL returns the URL endpoint used by the Client object. +func (c *Client) URL() string { + return c.generated().Endpoint() +} + +// NewBlobClient creates a new blob.Client object by concatenating blobName to the end of +// Client's URL. The blob name will be URL-encoded. +// The new blob.Client uses the same request policy pipeline as this Client. +func (c *Client) NewBlobClient(blobName string) *blob.Client { + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.credential(), c.getClientOptions())) +} + +// NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of +// this Client's URL. The blob name will be URL-encoded. +// The new appendblob.Client uses the same request policy pipeline as this Client. +func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) +} + +// NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of +// this Client's URL. The blob name will be URL-encoded. +// The new blockblob.Client uses the same request policy pipeline as this Client. +func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) +} + +// NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of +// this Client's URL. The blob name will be URL-encoded. +// The new pageblob.Client uses the same request policy pipeline as this Client. +func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(c.URL(), blobName) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + var opts *generated.ContainerClientCreateOptions + var cpkScopes *generated.ContainerCPKScopeInfo + if options != nil { + opts = &generated.ContainerClientCreateOptions{ + Access: options.Access, + Metadata: options.Metadata, + } + cpkScopes = options.CPKScopeInfo + } + resp, err := c.generated().Create(ctx, opts, cpkScopes) + + return resp, err +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions, modifiedAccessConditions := options.format() + resp, err := c.generated().Delete(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// Restore operation restore the contents and properties of a soft deleted container to a specified container. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/restore-container. +func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, options *RestoreOptions) (RestoreResponse, error) { + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return RestoreResponse{}, err + } + + opts := &generated.ContainerClientRestoreOptions{ + DeletedContainerName: &urlParts.ContainerName, + DeletedContainerVersion: &deletedContainerVersion, + } + resp, err := c.generated().Restore(ctx, opts) + + return resp, err +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetMetadata method at all simplifying the API. + // The optionals are nil, like they were in track 1.5 + opts, leaseAccessConditions := o.format() + + resp, err := c.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c *Client) SetMetadata(ctx context.Context, o *SetMetadataOptions) (SetMetadataResponse, error) { + metadataOptions, lac, mac := o.format() + resp, err := c.generated().SetMetadata(ctx, metadataOptions, lac, mac) + + return resp, err +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + options, ac := o.format() + resp, err := c.generated().GetAccessPolicy(ctx, options, ac) + return resp, err +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c *Client) SetAccessPolicy(ctx context.Context, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + accessPolicy, mac, lac, acl, err := o.format() + if err != nil { + return SetAccessPolicyResponse{}, err + } + resp, err := c.generated().SetAccessPolicy(ctx, acl, accessPolicy, mac, lac) + return resp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (c *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := c.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + +// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] { + listOptions := generated.ContainerClientListBlobFlatSegmentOptions{} + if o != nil { + listOptions.Include = o.Include.format() + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListBlobsFlatResponse]{ + More: func(page ListBlobsFlatResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsFlatResponse) (ListBlobsFlatResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListBlobsFlatResponse{}, err + } + resp, err := c.generated().InternalClient().Pipeline().Do(req) + if err != nil { + return ListBlobsFlatResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + // TOOD: storage error? + return ListBlobsFlatResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobFlatSegmentHandleResponse(resp) + }, + }) +} + +// NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] { + listOptions := o.format() + return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{ + More: func(page ListBlobsHierarchyResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListBlobsHierarchyResponse) (ListBlobsHierarchyResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions) + } + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + resp, err := c.generated().InternalClient().Pipeline().Do(req) + if err != nil { + return ListBlobsHierarchyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListBlobsHierarchyResponse{}, runtime.NewResponseError(resp) + } + return c.generated().ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if c.sharedKey() == nil { + return "", bloberror.MissingSharedKeyCredential + } + st := o.format() + urlParts, err := blob.ParseURL(c.URL()) + if err != nil { + return "", err + } + // Containers do not have snapshots, nor versions. + qps, err := sas.BlobSignatureValues{ + Version: sas.Version, + ContainerName: urlParts.ContainerName, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(c.sharedKey()) + if err != nil { + return "", err + } + + endpoint := c.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := c.credential().(type) { + case *azcore.TokenCredential: + conOptions := c.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: c.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := c.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} + +// FilterBlobs operation finds all blobs in the container whose tags match a given search expression. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags-container +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +func (c *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + containerClientFilterBlobsOptions := o.format() + resp, err := c.generated().FilterBlobs(ctx, where, containerClientFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go new file mode 100644 index 00000000000..09a8e8ed3f2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/constants.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +// AccessTier defines values for blob access tiers. +type AccessTier = generated.AccessTier + +const ( + AccessTierArchive AccessTier = generated.AccessTierArchive + AccessTierCool AccessTier = generated.AccessTierCool + AccessTierHot AccessTier = generated.AccessTierHot + AccessTierP10 AccessTier = generated.AccessTierP10 + AccessTierP15 AccessTier = generated.AccessTierP15 + AccessTierP20 AccessTier = generated.AccessTierP20 + AccessTierP30 AccessTier = generated.AccessTierP30 + AccessTierP4 AccessTier = generated.AccessTierP4 + AccessTierP40 AccessTier = generated.AccessTierP40 + AccessTierP50 AccessTier = generated.AccessTierP50 + AccessTierP6 AccessTier = generated.AccessTierP6 + AccessTierP60 AccessTier = generated.AccessTierP60 + AccessTierP70 AccessTier = generated.AccessTierP70 + AccessTierP80 AccessTier = generated.AccessTierP80 + AccessTierPremium AccessTier = generated.AccessTierPremium +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleAccessTierValues() +} + +// PublicAccessType defines values for AccessType - private (default) or blob or container. +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS. +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobType defines values for BlobType +type BlobType = generated.BlobType + +const ( + BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob + BlobTypePageBlob BlobType = generated.BlobTypePageBlob + BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return generated.PossibleBlobTypeValues() +} + +// ArchiveStatus defines values for ArchiveStatus +type ArchiveStatus = generated.ArchiveStatus + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool + ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return generated.PossibleArchiveStatusValues() +} + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode +type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode + +const ( + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return generated.PossibleImmutabilityPolicyModeValues() +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority = generated.RehydratePriority + +const ( + RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh + RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return generated.PossibleRehydratePriorityValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go new file mode 100644 index 00000000000..61d936ab73d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -0,0 +1,427 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "reflect" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CPKScopeInfo = generated.ContainerCPKScopeInfo + +// BlobFlatListSegment - List of BlobItem. +type BlobFlatListSegment = generated.BlobFlatListSegment + +// BlobHierarchyListSegment - List of BlobItem and BlobPrefix. +type BlobHierarchyListSegment = generated.BlobHierarchyListSegment + +// BlobProperties - Properties of a blob. +type BlobProperties = generated.BlobProperties + +// BlobItem - An Azure Storage blob. +type BlobItem = generated.BlobItem + +// BlobTags - Blob tags. +type BlobTags = generated.BlobTags + +// BlobPrefix is a blob's prefix when hierarchically listing blobs. +type BlobPrefix = generated.BlobPrefix + +// BlobTag - a key/value pair on a blob. +type BlobTag = generated.BlobTag + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = exported.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = exported.ModifiedAccessConditions + +// AccessPolicy - An Access policy. +type AccessPolicy = generated.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// SignedIdentifier - signed identifier. +type SignedIdentifier = generated.SignedIdentifier + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access. + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Specifies the encryption scope settings to set on the container. + CPKScopeInfo *CPKScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + AccessConditions *AccessConditions +} + +func (o *DeleteOptions) format() (*generated.ContainerClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatContainerAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// RestoreOptions contains the optional parameters for the Client.Restore method. +type RestoreOptions struct { + // placeholder for future options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type GetPropertiesOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetPropertiesOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool +} + +func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { + if reflect.ValueOf(l).IsZero() { + return nil + } + + include := []generated.ListBlobsIncludeItem{} + + if l.Copy { + include = append(include, generated.ListBlobsIncludeItemCopy) + } + if l.Deleted { + include = append(include, generated.ListBlobsIncludeItemDeleted) + } + if l.DeletedWithVersions { + include = append(include, generated.ListBlobsIncludeItemDeletedwithversions) + } + if l.ImmutabilityPolicy { + include = append(include, generated.ListBlobsIncludeItemImmutabilitypolicy) + } + if l.LegalHold { + include = append(include, generated.ListBlobsIncludeItemLegalhold) + } + if l.Metadata { + include = append(include, generated.ListBlobsIncludeItemMetadata) + } + if l.Snapshots { + include = append(include, generated.ListBlobsIncludeItemSnapshots) + } + if l.Tags { + include = append(include, generated.ListBlobsIncludeItemTags) + } + if l.UncommittedBlobs { + include = append(include, generated.ListBlobsIncludeItemUncommittedblobs) + } + if l.Versions { + include = append(include, generated.ListBlobsIncludeItemVersions) + } + + return include +} + +// ListBlobsFlatOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment method. +type ListBlobsFlatOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by MaxResults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListBlobsHierarchyOptions provides set of configurations for Client.NewListBlobsHierarchyPager +type ListBlobsHierarchyOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListBlobsInclude + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by MaxResults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment method. +func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHierarchySegmentOptions { + if o == nil { + return generated.ContainerClientListBlobHierarchySegmentOptions{} + } + + return generated.ContainerClientListBlobHierarchySegmentOptions{ + Include: o.Include.format(), + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + Metadata map[string]*string + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.ContainerClientSetMetadataOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return &generated.ContainerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. +type GetAccessPolicyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation. +type SetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access. + // If this header is not included in the request, container data is private to the account owner. + Access *PublicAccessType + AccessConditions *AccessConditions + ContainerACL []*SignedIdentifier +} + +func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions, []*SignedIdentifier, error) { + if o == nil { + return nil, nil, nil, nil, nil + } + if o.ContainerACL != nil { + for _, c := range o.ContainerACL { + err := formatTime(c) + if err != nil { + return nil, nil, nil, nil, err + } + } + } + lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions) + return &generated.ContainerClientSetAccessPolicyOptions{ + Access: o.Access, + }, lac, mac, o.ContainerACL, nil +} + +func formatTime(c *SignedIdentifier) error { + if c.AccessPolicy == nil { + return nil + } + + if c.AccessPolicy.Start != nil { + st, err := time.Parse(time.RFC3339, c.AccessPolicy.Start.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + c.AccessPolicy.Start = &st + } + if c.AccessPolicy.Expiry != nil { + et, err := time.Parse(time.RFC3339, c.AccessPolicy.Expiry.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + c.AccessPolicy.Expiry = &et + } + + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ContainerClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FilterBlobs. +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 +} + +func (o *FilterBlobsOptions) format() *generated.ContainerClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ContainerClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go new file mode 100644 index 00000000000..9aaefe277fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -0,0 +1,69 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.ContainerClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.ContainerClientDeleteResponse + +// RestoreResponse contains the response from method Client.Restore. +type RestoreResponse = generated.ContainerClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse + +// ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse + +// ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment. +type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse = generated.ListBlobsHierarchySegmentResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.ContainerClientSetMetadataResponse + +// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy. +type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse + +// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. +type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ContainerClientGetAccountInfoResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ContainerClientFilterBlobsResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go new file mode 100644 index 00000000000..9a4806c5757 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -0,0 +1,210 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +/* + +Package azblob can access an Azure Blob Storage. + +The azblob package is capable of :- + - Creating, deleting, and querying containers in an account + - Creating, deleting, and querying blobs in a container + - Creating Shared Access Signature for authentication + +Types of Resources + +The azblob package allows you to interact with three types of resources :- + +* Azure storage accounts. +* Containers within those storage accounts. +* Blobs (block blobs/ page blobs/ append blobs) within those containers. + +The Azure Blob Storage (azblob) client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's blob service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azblob library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String, +or authorization with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handle(err) + + fmt.Println(serviceClient.URL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := azblob.NewClientFromConnectionString(connStr, nil) + handle(err) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASToken() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.URL()) + + // Alternatively, you can create SAS on the fly + + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.ServiceClient().GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start}) + handle(err) + + serviceClientWithSAS, err := azblob.NewClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.URL()) + +Types of Clients + +There are three different clients provided to interact with the various components of the Blob Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, and delete containers within the account. + +2. **`ContainerClient`** + * Get and set container access settings, properties, and metadata. + * Create, delete, and query blobs within the container. + * `ContainerLeaseClient` to support container lease management. + +3. **`BlobClient`** + * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` + * Get and set blob properties. + * Perform CRUD operations on a given blob. + * `BlobLeaseClient` to support blob lease management. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ + serviceClient, err := azblob.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a container ===== + + // First, create a container client, and use the Create method to create a new container in your account + containerClient := serviceClient.ServiceClient().NewContainerClient("testcontainer") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc. + // If you want to use the default options, pass in nil. + _, err = containerClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Upload and Download a block blob ===== + uploadData := "Hello world!" + + // Create a new blockBlobClient from the containerClient + blockBlobClient := containerClient.NewBlockBlobClient("HelloWorld.txt") + handle(err) + + // Upload data to the block blob + blockBlobUploadOptions := blockblob.UploadOptions{ + Metadata: map[string]*string{"Foo": to.Ptr("Bar")}, + Tags: map[string]string{"Year": "2022"}, + } + _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) + handle(err) + + // Download the blob's contents and ensure that the download worked properly + blobDownloadResponse, err := blockBlobClient.DownloadStream(context.TODO(), nil) + handle(err) + + // Use the bytes.Buffer object to read the downloaded data. + // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. + reader := blobDownloadResponse.Body(nil) + downloadData, err := io.ReadAll(reader) + handle(err) + if string(downloadData) != uploadData { + handle(errors.New("uploaded data should be same as downloaded data")) + } + + if err = reader.Close(); err != nil { + handle(err) + return + } + + // ===== 3. List blobs ===== + // List methods returns a pager object which can be used to iterate over the results of a paging operation. + // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. + // PageResponse() can be used to iterate over the results of the specific page. + // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. + pager := containerClient.NewListBlobsFlatPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + handle(err) + for _, v := range resp.Segment.BlobItems { + fmt.Println(*v.Name) + } + } + + // Delete the blob. + _, err = blockBlobClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the container. + _, err = containerClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azblob diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go new file mode 100644 index 00000000000..073de855b61 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "strings" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions + + // Audience to use when requesting tokens for Azure Active Directory authentication. + // Only has an effect when credential is of type TokenCredential. The value could be + // https://storage.azure.com/ (default) or https://.blob.core.windows.net. + Audience string +} + +type Client[T any] struct { + inner *T + credential any + options *ClientOptions +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + switch cred := client.credential.(type) { + case *exported.SharedKeyCredential: + return cred + default: + return nil + } +} + +func Credential[T any](client *Client[T]) any { + return client.credential +} + +func GetClientOptions[T any](client *Client[T]) *ClientOptions { + return client.options +} + +func GetAudience(clOpts *ClientOptions) string { + if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 { + return shared.TokenScope + } else { + return strings.TrimRight(clOpts.Audience, "/") + "/.default" + } +} + +func NewClient[T any](inner *T) *Client[T] { + return &Client[T]{inner: inner} +} + +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(containerURL, azClient), + credential: credential, + options: options, + } +} + +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] { + return &Client[generated.ContainerClient]{ + inner: generated.NewContainerClient(containerURL, azClient), + credential: credential, + options: options, + } +} + +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] { + return &Client[generated.BlobClient]{ + inner: generated.NewBlobClient(blobURL, azClient), + credential: credential, + options: options, + } +} + +type CompositeClient[T, U any] struct { + innerT *T + innerU *U + sharedKey *exported.SharedKeyCredential +} + +func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { + return &Client[T]{ + inner: client.innerT, + credential: client.sharedKey, + }, client.innerU +} + +func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { + return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewAppendBlobClient(blobURL, azClient), + sharedKey: sharedKey, + } +} + +func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { + return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewBlockBlobClient(blobURL, azClient), + sharedKey: sharedKey, + } +} + +func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { + return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewPageBlobClient(blobURL, azClient), + sharedKey: sharedKey, + } +} + +func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential { + return client.sharedKey +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go new file mode 100644 index 00000000000..96d188fa567 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_conditions.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func FormatContainerAccessConditions(b *ContainerAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func FormatBlobAccessConditions(b *BlobAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) { + if b == nil { + return nil, nil + } + return b.LeaseAccessConditions, b.ModifiedAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// ModifiedAccessConditions contains a group of parameters for specifying access conditions. +type ModifiedAccessConditions = generated.ModifiedAccessConditions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go new file mode 100644 index 00000000000..14c293cf656 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/access_policy.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "fmt" +) + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p *AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go new file mode 100644 index 00000000000..c26c62aa883 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -0,0 +1,280 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +const ( + batchIdPrefix = "batch_" + httpVersion = "HTTP/1.1" + httpNewline = "\r\n" +) + +// createBatchID is used for creating a new batch id which is used as batch boundary in the request body +func createBatchID() (string, error) { + batchID, err := uuid.New() + if err != nil { + return "", err + } + + return batchIdPrefix + batchID.String(), nil +} + +// buildSubRequest is used for building the sub-request. Example: +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account: +// Content-Length: 0 +func buildSubRequest(req *policy.Request) []byte { + var batchSubRequest strings.Builder + blobPath := req.Raw().URL.EscapedPath() + if len(req.Raw().URL.RawQuery) > 0 { + blobPath += "?" + req.Raw().URL.RawQuery + } + + batchSubRequest.WriteString(fmt.Sprintf("%s %s %s%s", req.Raw().Method, blobPath, httpVersion, httpNewline)) + + for k, v := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + continue + } + if len(v) > 0 { + batchSubRequest.WriteString(fmt.Sprintf("%v: %v%v", k, v[0], httpNewline)) + } + } + + batchSubRequest.WriteString(httpNewline) + return []byte(batchSubRequest.String()) +} + +// CreateBatchRequest creates a new batch request using the sub-requests present in the BlobBatchBuilder. +// +// Example of a sub-request in the batch request body: +// +// --batch_357de4f7-6d0b-4e02-8cd2-6361411a9525 +// Content-Type: application/http +// Content-Transfer-Encoding: binary +// Content-ID: 0 +// +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account: +// Content-Length: 0 +func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { + batchID, err := createBatchID() + if err != nil { + return nil, "", err + } + + // Create a new multipart buffer + reqBody := &bytes.Buffer{} + writer := multipart.NewWriter(reqBody) + + // Set the boundary + err = writer.SetBoundary(batchID) + if err != nil { + return nil, "", err + } + + partHeaders := make(textproto.MIMEHeader) + partHeaders["Content-Type"] = []string{"application/http"} + partHeaders["Content-Transfer-Encoding"] = []string{"binary"} + var partWriter io.Writer + + for i, req := range bb.SubRequests { + if bb.AuthPolicy != nil { + _, err := bb.AuthPolicy.Do(req) + if err != nil && !strings.EqualFold(err.Error(), "no more policies") { + if log.Should(EventSubmitBatch) { + log.Writef(EventSubmitBatch, "failed to authorize sub-request for %v.\nError: %v", req.Raw().URL.Path, err.Error()) + } + return nil, "", err + } + } + + partHeaders["Content-ID"] = []string{fmt.Sprintf("%v", i)} + partWriter, err = writer.CreatePart(partHeaders) + if err != nil { + return nil, "", err + } + + _, err = partWriter.Write(buildSubRequest(req)) + if err != nil { + return nil, "", err + } + } + + // Close the multipart writer + err = writer.Close() + if err != nil { + return nil, "", err + } + + return reqBody.Bytes(), batchID, nil +} + +// UpdateSubRequestHeaders updates the sub-request headers. +// Removes x-ms-version header. +func UpdateSubRequestHeaders(req *policy.Request) { + // remove x-ms-version header from the request header + for k := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + delete(req.Raw().Header, k) + } + } +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem struct { + ContentID *int + ContainerName *string + BlobName *string + RequestID *string + Version *string + Error error // nil error indicates that the batch sub-request operation is successful +} + +func getResponseBoundary(contentType *string) (string, error) { + if contentType == nil { + return "", fmt.Errorf("Content-Type returned in SubmitBatch response is nil") + } + + _, params, err := mime.ParseMediaType(*contentType) + if err != nil { + return "", err + } + + if val, ok := params["boundary"]; ok { + return val, nil + } else { + return "", fmt.Errorf("batch boundary not present in Content-Type header of the SubmitBatch response.\nContent-Type: %v", *contentType) + } +} + +func getContentID(part *multipart.Part) (*int, error) { + contentID := part.Header.Get("Content-ID") + if contentID == "" { + return nil, nil + } + + val, err := strconv.Atoi(strings.TrimSpace(contentID)) + if err != nil { + return nil, err + } + return &val, nil +} + +func getResponseHeader(key string, resp *http.Response) *string { + val := resp.Header.Get(key) + if val == "" { + return nil + } + return &val +} + +// ParseBlobBatchResponse is used for parsing the batch response body into individual sub-responses for each item in the batch. +func ParseBlobBatchResponse(respBody io.ReadCloser, contentType *string, subRequests []*policy.Request) ([]*BatchResponseItem, error) { + boundary, err := getResponseBoundary(contentType) + if err != nil { + return nil, err + } + + respReader := multipart.NewReader(respBody, boundary) + var responses []*BatchResponseItem + + for { + part, err := respReader.NextPart() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + + batchSubResponse := &BatchResponseItem{} + batchSubResponse.ContentID, err = getContentID(part) + if err != nil { + return nil, err + } + + if batchSubResponse.ContentID != nil { + path := strings.Trim(subRequests[*batchSubResponse.ContentID].Raw().URL.Path, "/") + p := strings.Split(path, "/") + batchSubResponse.ContainerName = to.Ptr(p[0]) + batchSubResponse.BlobName = to.Ptr(strings.Join(p[1:], "/")) + } + + respBytes, err := io.ReadAll(part) + if err != nil { + return nil, err + } + respBytes = append(respBytes, byte('\n')) + buf := bytes.NewBuffer(respBytes) + resp, err := http.ReadResponse(bufio.NewReader(buf), nil) + // sub-response parsing error + if err != nil { + return nil, err + } + + batchSubResponse.RequestID = getResponseHeader(shared.HeaderXmsRequestID, resp) + batchSubResponse.Version = getResponseHeader(shared.HeaderXmsVersion, resp) + + // sub-response failure + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if len(responses) == 0 && batchSubResponse.ContentID == nil { + // this case can happen when the parent request fails. + // For example, batch request having more than 256 sub-requests. + return nil, fmt.Errorf("%v", string(respBytes)) + } + + resp.Request = subRequests[*batchSubResponse.ContentID].Raw() + batchSubResponse.Error = runtime.NewResponseError(resp) + } + + responses = append(responses, batchSubResponse) + } + + if len(responses) != len(subRequests) { + return nil, fmt.Errorf("expected %v responses, got %v for the batch ID: %v", len(subRequests), len(responses), boundary) + } + + return responses, nil +} + +// not exported but used for batch request creation + +// BlobBatchBuilder is used for creating the blob batch request +type BlobBatchBuilder struct { + AuthPolicy policy.Policy + SubRequests []*policy.Request +} + +// BlobBatchOperationType defines the operation of the blob batch sub-requests. +type BlobBatchOperationType string + +const ( + BatchDeleteOperationType BlobBatchOperationType = "delete" + BatchSetTierOperationType BlobBatchOperationType = "set tier" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go new file mode 100644 index 00000000000..d0355727c90 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -0,0 +1,33 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "fmt" + "strconv" +) + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset and zero value count indicates from the offset to the resource's end. +type HTTPRange struct { + Offset int64 + Count int64 +} + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go new file mode 100644 index 00000000000..d775fb5c88a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go @@ -0,0 +1,20 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azblob/log.go +const ( + // EventUpload is used when we compute number of blocks to upload and size of each block. + EventUpload log.Event = "azblob.Upload" + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch log.Event = "azblob.SubmitBatch" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go new file mode 100644 index 00000000000..71473decab4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/set_expiry.go @@ -0,0 +1,71 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "net/http" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// ExpiryType defines values for ExpiryType +type ExpiryType interface { + Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) + notPubliclyImplementable() +} + +// ExpiryTypeAbsolute defines the absolute time for the blob expiry +type ExpiryTypeAbsolute time.Time + +// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry +type ExpiryTypeRelativeToNow time.Duration + +// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry +type ExpiryTypeRelativeToCreation time.Duration + +// ExpiryTypeNever defines that the blob will be set to never expire +type ExpiryTypeNever struct { + // empty struct since NeverExpire expiry type does not require expiry time +} + +// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method. +type SetExpiryOptions struct { + // placeholder for future options +} + +func (e ExpiryTypeAbsolute) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsAbsolute, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(time.Time(e).UTC().Format(http.TimeFormat)), + } +} + +func (e ExpiryTypeAbsolute) notPubliclyImplementable() {} + +func (e ExpiryTypeRelativeToNow) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsRelativeToNow, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), + } +} + +func (e ExpiryTypeRelativeToNow) notPubliclyImplementable() {} + +func (e ExpiryTypeRelativeToCreation) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsRelativeToCreation, &generated.BlobClientSetExpiryOptions{ + ExpiresOn: to.Ptr(strconv.FormatInt(time.Duration(e).Milliseconds(), 10)), + } +} + +func (e ExpiryTypeRelativeToCreation) notPubliclyImplementable() {} + +func (e ExpiryTypeNever) Format(o *SetExpiryOptions) (generated.ExpiryOptions, *generated.BlobClientSetExpiryOptions) { + return generated.ExpiryOptionsNeverExpire, nil +} + +func (e ExpiryTypeNever) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go new file mode 100644 index 00000000000..adf46b06816 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -0,0 +1,225 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := getHeader(shared.HeaderContentLength, headers) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), + contentLength, + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), + "", // Empty date because x-ms-date is expected (as per web page above) + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} +} + +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SharedKeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if s.cred == nil { + return req.Next() + } + + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.computeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go new file mode 100644 index 00000000000..f3e571fa6a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/transfer_validation_option.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "encoding/binary" + "hash/crc64" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" +) + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType interface { + Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + notPubliclyImplementable() +} + +// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64. +type TransferValidationTypeCRC64 uint64 + +func (c TransferValidationTypeCRC64) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, uint64(c)) + cfg.SetCRC64(buf) + return rsc, nil +} + +func (TransferValidationTypeCRC64) notPubliclyImplementable() {} + +// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer. +func TransferValidationTypeComputeCRC64() TransferValidationType { + return transferValidationTypeFn(func(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + buf, err := io.ReadAll(rsc) + if err != nil { + return nil, err + } + + crc := crc64.Checksum(buf, shared.CRC64Table) + return TransferValidationTypeCRC64(crc).Apply(streaming.NopCloser(bytes.NewReader(buf)), cfg) + }) +} + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 []byte + +func (c TransferValidationTypeMD5) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + cfg.SetMD5(c) + return rsc, nil +} + +func (TransferValidationTypeMD5) notPubliclyImplementable() {} + +type transferValidationTypeFn func(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + +func (t transferValidationTypeFn) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + return t(rsc, cfg) +} + +func (transferValidationTypeFn) notPubliclyImplementable() {} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go new file mode 100644 index 00000000000..2e2dd16e426 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/user_delegation_credential.go @@ -0,0 +1,64 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it +func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential { + return &UserDelegationCredential{ + accountName: accountName, + userDelegationKey: udk, + } +} + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential struct { + accountName string + userDelegationKey UserDelegationKey +} + +// getAccountName returns the Storage account's Name +func (f *UserDelegationCredential) getAccountName() string { + return f.accountName +} + +// GetAccountName is a helper method for accessing the user delegation key parameters outside this package. +func GetAccountName(udc *UserDelegationCredential) string { + return udc.getAccountName() +} + +// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) { + bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value) + h := hmac.New(sha256.New, bytes) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside this package. +func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) { + return udc.computeHMACSHA256(message) +} + +// getUDKParams returns UserDelegationKey +func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.userDelegationKey +} + +// GetUDKParams is a helper method for accessing the user delegation key parameters outside this package. +func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey { + return udc.getUDKParams() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go new file mode 100644 index 00000000000..720d6e8fdbd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + ModuleVersion = "v1.3.2" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go new file mode 100644 index 00000000000..288df7edda4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -0,0 +1,32 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *AppendBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *AppendBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient { + client := &AppendBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md new file mode 100644 index 00000000000..92dc7e2d31e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -0,0 +1,475 @@ +# Code Generation - Azure Blob SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: ../generated +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.61" +``` + +### Updating service version to 2023-11-03 +```yaml +directive: +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). + replaceAll(`2021-12-02`, `2023-11-03`); +``` + +### Undo breaking change with BlobName +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/Name\s+\*BlobName/g, `Name *string`); +``` + +### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; +``` + +### Remove pager methods and export various generated methods in container client + +``` yaml +directive: + - from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). + replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` + +# Export various createRequest/HandleResponse methods + +``` yaml +directive: +- from: zz_container_client.go + where: $ + transform: >- + return $. + replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }). + replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` }); + +- from: zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }). + replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` }); +``` + +### Clean up some const type names so they don't stutter + +``` yaml +directive: +- from: swagger-document + where: $.parameters['BlobDeleteType'] + transform: > + $["x-ms-enum"].name = "DeleteType"; + $["x-ms-client-name"] = "DeleteType"; + +- from: swagger-document + where: $.parameters['BlobExpiryOptions'] + transform: > + $["x-ms-enum"].name = "ExpiryOptions"; + $["x-ms-client-name"].name = "ExpiryOptions"; + +- from: swagger-document + where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"] + transform: > + $["x-ms-client-name"].name = "ImmutabilityPolicyMode"; + $.enum = [ "Mutable", "Unlocked", "Locked"]; + $["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false }; + +- from: swagger-document + where: $.parameters['ImmutabilityPolicyMode'] + transform: > + $["x-ms-enum"].name = "ImmutabilityPolicySetting"; + $["x-ms-client-name"].name = "ImmutabilityPolicySetting"; + +- from: swagger-document + where: $.definitions['BlobPropertiesInternal'] + transform: > + $.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: + - zz_models.go + - zz_options.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`). + replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`). + replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`). + replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). + replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`). + replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`). + replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`). + replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`); +``` + +### Unsure why this casing changed, but fixing it + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/SignedOid\s+\*string/g, `SignedOID *string`). + replace(/SignedTid\s+\*string/g, `SignedTID *string`); +``` + +### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed + +``` yaml +directive: +- from: zz_constants.go + where: $ + transform: >- + return $. + replace(/IncrementalCopyOfEralierVersionSnapshotNotAllowed/g, "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"); +``` + +### Fix up x-ms-content-crc64 header response name + +``` yaml +directive: +- from: swagger-document + where: $.x-ms-paths.*.*.responses.*.headers.x-ms-content-crc64 + transform: > + $["x-ms-client-name"] = "ContentCRC64" +``` + +``` yaml +directive: +- rename-model: + from: BlobItemInternal + to: BlobItem +- rename-model: + from: BlobPropertiesInternal + to: BlobProperties +``` + +### Updating encoding URL, Golang adds '+' which disrupts encoding with service + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`) +``` + +### Change `where` parameter in blob filtering to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.FilterBlobsWhere + transform: > + $.required = true; +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Change CPK acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cpk/g, "CPK"); +``` + +### Change CORS acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Fix Content-Type header in submit batch request + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`); +``` + +### Fix response status code check in submit batch request + +``` yaml +directive: +- from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, + `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); +``` + +### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_blob_client.go + - zz_appendblob_client.go + - zz_blockblob_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go new file mode 100644 index 00000000000..343073b2e66 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "time" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +func (client *BlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlobClient) InternalClient() *azcore.Client { + return client.internal +} + +func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) +} + +func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) +} + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { + client := &BlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go new file mode 100644 index 00000000000..873d9a419fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -0,0 +1,32 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *BlockBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *BlockBlobClient) Internal() *azcore.Client { + return client.internal +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient { + client := &BlockBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go new file mode 100644 index 00000000000..57f112001bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go new file mode 100644 index 00000000000..8f2bbbb7cb8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +const ServiceVersion = "2023-11-03" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go new file mode 100644 index 00000000000..d43b2c78259 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *ContainerClient) Endpoint() string { + return client.endpoint +} + +func (client *ContainerClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { + client := &ContainerClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go new file mode 100644 index 00000000000..aaef9f53ba6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go @@ -0,0 +1,141 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/url" +) + +type TransactionalContentSetter interface { + SetCRC64([]byte) + SetMD5([]byte) +} + +func (a *AppendBlobClientAppendBlockOptions) SetCRC64(v []byte) { + a.TransactionalContentCRC64 = v +} + +func (a *AppendBlobClientAppendBlockOptions) SetMD5(v []byte) { + a.TransactionalContentMD5 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientStageBlockOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetCRC64(v []byte) { + p.TransactionalContentCRC64 = v +} + +func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { + p.TransactionalContentMD5 = v +} + +func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + +type SourceContentSetter interface { + SetSourceContentCRC64(v []byte) + SetSourceContentMD5(v []byte) +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + a.SourceContentcrc64 = v +} + +func (a *AppendBlobClientAppendBlockFromURLOptions) SetSourceContentMD5(v []byte) { + a.SourceContentMD5 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentCRC64(v []byte) { + b.SourceContentcrc64 = v +} + +func (b *BlockBlobClientStageBlockFromURLOptions) SetSourceContentMD5(v []byte) { + b.SourceContentMD5 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte) { + p.SourceContentcrc64 = v +} + +func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { + p.SourceContentMD5 = v +} + +// Custom UnmarshalXML functions for types that need special handling. + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix. +func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobPrefix + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. +func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobItem + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go new file mode 100644 index 00000000000..a7c76208aa2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *PageBlobClient) Endpoint() string { + return client.endpoint +} + +func (client *PageBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient { + client := &PageBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go new file mode 100644 index 00000000000..32c15a2b097 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go new file mode 100644 index 00000000000..797318611c3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -0,0 +1,662 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// AppendBlobClient contains the methods for the AppendBlob group. +// Don't use this type directly, use a constructor function instead. +type AppendBlobClient struct { + internal *azcore.Client + endpoint string +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - body - Initial data +// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + var err error + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockResponse{}, err + } + resp, err := client.appendBlockHandleResponse(httpResp) + return resp, err +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { + result := AppendBlobClientAppendBlockResponse{} + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - sourceURL - Specify a URL to the copy source. +// - contentLength - The length of the request. +// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + var err error + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.appendBlockFromURLHandleResponse(httpResp) + return resp, err +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { + result := AppendBlobClientAppendBlockFromURLResponse{} + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + var err error + req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientCreateResponse{}, err + } + resp, err := client.createHandleResponse(httpResp) + return resp, err +} + +// createCreateRequest creates the Create request. +func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { + result := AppendBlobClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock +// method. +func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + var err error + req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientSealResponse{}, err + } + resp, err := client.sealHandleResponse(httpResp) + return resp, err +} + +// sealCreateRequest creates the Seal request. +func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { + result := AppendBlobClientSealResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go new file mode 100644 index 00000000000..fe568a96c7a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -0,0 +1,2962 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// BlobClient contains the methods for the Blob group. +// Don't use this type directly, use a constructor function instead. +type BlobClient struct { + internal *azcore.Client + endpoint string +} + +// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination +// blob with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + var err error + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientAbortCopyFromURLResponse{}, err + } + resp, err := client.abortCopyFromURLHandleResponse(httpResp) + return resp, err +} + +// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. +func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. +func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobClientAbortCopyFromURLResponse, error) { + result := BlobClientAbortCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAbortCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + var err error + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientAcquireLeaseResponse{}, err + } + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { + result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + var err error + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientBreakLeaseResponse{}, err + } + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { + result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + var err error + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientChangeLeaseResponse{}, err + } + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { + result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response +// until the copy is complete. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + var err error + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientCopyFromURLResponse{}, err + } + resp, err := client.copyFromURLHandleResponse(httpResp) + return resp, err +} + +// copyFromURLCreateRequest creates the CopyFromURL request. +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyFromURLHandleResponse handles the CopyFromURL response. +func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { + result := BlobClientCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + var err error + req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientCreateSnapshotResponse{}, err + } + resp, err := client.createSnapshotHandleResponse(httpResp) + return resp, err +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "snapshot") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { + result := BlobClientCreateSnapshotResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed +// from the storage account. If the storage account's soft delete feature is enabled, +// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service +// retains the blob or snapshot for the number of days specified by the +// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number +// of days has passed, the blob's data is permanently removed from the storage +// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use +// the List Blobs API and specify the "include=deleted" query parameter to discover +// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to +// return an HTTP status code of 404 (ResourceNotFound). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + var err error + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteResponse{}, err + } + resp, err := client.deleteHandleResponse(httpResp) + return resp, err +} + +// deleteCreateRequest creates the Delete request. +func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientDeleteResponse, error) { + result := BlobClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + var err error + req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) + return resp, err +} + +// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. +func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. +func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientDeleteImmutabilityPolicyResponse, error) { + result := BlobClientDeleteImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDeleteImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You +// can also call Download to read a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + var err error + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientDownloadResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + err = runtime.NewResponseError(httpResp) + return BlobClientDownloadResponse{}, err + } + resp, err := client.downloadHandleResponse(httpResp) + return resp, err +} + +// downloadCreateRequest creates the Download request. +func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + } + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { + result := BlobClientDownloadResponse{Body: resp.Body} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.CreationTime = &creationTime + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.LegalHold = &legalHold + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + var err error + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetAccountInfoResponse{}, err + } + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { + result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties +// for the blob. It does not return the content of the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + var err error + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetPropertiesResponse{}, err + } + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { + result := BlobClientGetPropertiesResponse{} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.AccessTierInferred = &accessTierInferred + } + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.CreationTime = &creationTime + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ExpiresOn = &expiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsIncrementalCopy = &isIncrementalCopy + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.LegalHold = &legalHold + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// GetTags - The Get Tags operation enables users to get the tags associated with a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + var err error + req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetTagsResponse{}, err + } + resp, err := client.getTagsHandleResponse(httpResp) + return resp, err +} + +// getTagsCreateRequest creates the GetTags request. +func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getTagsHandleResponse handles the GetTags response. +func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClientGetTagsResponse, error) { + result := BlobClientGetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetTagsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { + return BlobClientGetTagsResponse{}, err + } + return result, nil +} + +// Query - The Query operation enables users to select/project on blob data by providing simple query expressions. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + var err error + req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return BlobClientQueryResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientQueryResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientQueryResponse{}, err + } + resp, err := client.queryHandleResponse(httpResp) + return resp, err +} + +// queryCreateRequest creates the Query request. +func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "query") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.QueryRequest != nil { + if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { + return nil, err + } + return req, nil + } + return req, nil +} + +// queryHandleResponse handles the Query response. +func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { + result := BlobClientQueryResponse{Body: resp.Body} + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + var err error + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientReleaseLeaseResponse{}, err + } + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { + result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + var err error + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientRenewLeaseResponse{}, err + } + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { + result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetExpiry - Sets the time a blob will expire and be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - expiryOptions - Required. Indicates mode of the expiry time +// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + var err error + req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetExpiryResponse{}, err + } + resp, err := client.setExpiryHandleResponse(httpResp) + return resp, err +} + +// setExpiryCreateRequest creates the SetExpiry request. +func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "expiry") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-expiry-option"] = []string{string(expiryOptions)} + if options != nil && options.ExpiresOn != nil { + req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setExpiryHandleResponse handles the SetExpiry response. +func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { + result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + var err error + req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetHTTPHeadersResponse{}, err + } + resp, err := client.setHTTPHeadersHandleResponse(httpResp) + return resp, err +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { + result := BlobClientSetHTTPHeadersResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + var err error + req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetImmutabilityPolicyResponse{}, err + } + resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) + return resp, err +} + +// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. +func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. +func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (BlobClientSetImmutabilityPolicyResponse, error) { + result := BlobClientSetImmutabilityPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetImmutabilityPolicyResponse{}, err + } + result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - legalHold - Specified if a legal hold should be set on the blob. +// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + var err error + req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetLegalHoldResponse{}, err + } + resp, err := client.setLegalHoldHandleResponse(httpResp) + return resp, err +} + +// setLegalHoldCreateRequest creates the SetLegalHold request. +func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "legalhold") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setLegalHoldHandleResponse handles the SetLegalHold response. +func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobClientSetLegalHoldResponse, error) { + result := BlobClientSetLegalHoldResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetLegalHoldResponse{}, err + } + result.LegalHold = &legalHold + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value +// pairs +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + var err error + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetMetadataResponse{}, err + } + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { + result := BlobClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// SetTags - The Set Tags operation enables users to set tags on a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - tags - Blob tags +// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + var err error + req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTagsResponse{}, err + } + resp, err := client.setTagsHandleResponse(httpResp) + return resp, err +} + +// setTagsCreateRequest creates the SetTags request. +func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, tags); err != nil { + return nil, err + } + return req, nil +} + +// setTagsHandleResponse handles the SetTags response. +func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClientSetTagsResponse, error) { + result := BlobClientSetTagsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetTagsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A +// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - tier - Indicates the tier to be set on the blob. +// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + var err error + req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlobClientSetTierResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientSetTierResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTierResponse{}, err + } + resp, err := client.setTierHandleResponse(httpResp) + return resp, err +} + +// setTierCreateRequest creates the SetTier request. +func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tier") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setTierHandleResponse handles the SetTier response. +func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClientSetTierResponse, error) { + result := BlobClientSetTierResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + var err error + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientStartCopyFromURLResponse{}, err + } + resp, err := client.startCopyFromURLHandleResponse(httpResp) + return resp, err +} + +// startCopyFromURLCreateRequest creates the StartCopyFromURL request. +func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// startCopyFromURLHandleResponse handles the StartCopyFromURL response. +func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { + result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// Undelete - Undelete a blob that was previously soft deleted +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + var err error + req, err := client.undeleteCreateRequest(ctx, options) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientUndeleteResponse{}, err + } + resp, err := client.undeleteHandleResponse(httpResp) + return resp, err +} + +// undeleteCreateRequest creates the Undelete request. +func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *BlobClientUndeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// undeleteHandleResponse handles the Undelete response. +func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClientUndeleteResponse, error) { + result := BlobClientUndeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientUndeleteResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go new file mode 100644 index 00000000000..b6115b50a65 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -0,0 +1,993 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// BlockBlobClient contains the methods for the BlockBlob group. +// Don't use this type directly, use a constructor function instead. +type BlockBlobClient struct { + internal *azcore.Client + endpoint string +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - blocks - Blob Blocks. +// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + var err error + req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.commitBlockListHandleResponse(httpResp) + return resp, err +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, blocks); err != nil { + return nil, err + } + return req, nil +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { + result := BlockBlobClientCommitBlockListResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + var err error + req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientGetBlockListResponse{}, err + } + resp, err := client.getBlockListHandleResponse(httpResp) + return resp, err +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + reqQP.Set("blocklisttype", string(listType)) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { + result := BlockBlobClientGetBlockListResponse{} + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL +// method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + var err error + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.putBlobFromURLHandleResponse(httpResp) + return resp, err +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { + result := BlockBlobClientPutBlobFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + var err error + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockResponse{}, err + } + resp, err := client.stageBlockHandleResponse(httpResp) + return resp, err +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { + result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// - contentLength - The length of the request. +// - sourceURL - Specify a URL to the copy source. +// - options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + var err error + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.stageBlockFromURLHandleResponse(httpResp) + return resp, err +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { + result := BlockBlobClientStageBlockFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - body - Initial data +// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + var err error + req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientUploadResponse{}, err + } + resp, err := client.uploadHandleResponse(httpResp) + return resp, err +} + +// uploadCreateRequest creates the Upload request. +func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// uploadHandleResponse handles the Upload response. +func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { + result := BlockBlobClientUploadResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go new file mode 100644 index 00000000000..95af9e15447 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -0,0 +1,747 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +type AccessTier string + +const ( + AccessTierArchive AccessTier = "Archive" + AccessTierCold AccessTier = "Cold" + AccessTierCool AccessTier = "Cool" + AccessTierHot AccessTier = "Hot" + AccessTierP10 AccessTier = "P10" + AccessTierP15 AccessTier = "P15" + AccessTierP20 AccessTier = "P20" + AccessTierP30 AccessTier = "P30" + AccessTierP4 AccessTier = "P4" + AccessTierP40 AccessTier = "P40" + AccessTierP50 AccessTier = "P50" + AccessTierP6 AccessTier = "P6" + AccessTierP60 AccessTier = "P60" + AccessTierP70 AccessTier = "P70" + AccessTierP80 AccessTier = "P80" + AccessTierPremium AccessTier = "Premium" +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{ + AccessTierArchive, + AccessTierCold, + AccessTierCool, + AccessTierHot, + AccessTierP10, + AccessTierP15, + AccessTierP20, + AccessTierP30, + AccessTierP4, + AccessTierP40, + AccessTierP50, + AccessTierP6, + AccessTierP60, + AccessTierP70, + AccessTierP80, + AccessTierPremium, + } +} + +type AccountKind string + +const ( + AccountKindBlobStorage AccountKind = "BlobStorage" + AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindStorage AccountKind = "Storage" + AccountKindStorageV2 AccountKind = "StorageV2" +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return []AccountKind{ + AccountKindBlobStorage, + AccountKindBlockBlobStorage, + AccountKindFileStorage, + AccountKindStorage, + AccountKindStorageV2, + } +} + +type ArchiveStatus string + +const ( + ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold" + ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" + ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCold, + ArchiveStatusRehydratePendingToCool, + ArchiveStatusRehydratePendingToHot, + } +} + +type BlobCopySourceTags string + +const ( + BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY" + BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE" +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return []BlobCopySourceTags{ + BlobCopySourceTagsCOPY, + BlobCopySourceTagsREPLACE, + } +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus string + +const ( + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return []BlobGeoReplicationStatus{ + BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusLive, + BlobGeoReplicationStatusUnavailable, + } +} + +type BlobType string + +const ( + BlobTypeAppendBlob BlobType = "AppendBlob" + BlobTypeBlockBlob BlobType = "BlockBlob" + BlobTypePageBlob BlobType = "PageBlob" +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{ + BlobTypeAppendBlob, + BlobTypeBlockBlob, + BlobTypePageBlob, + } +} + +type BlockListType string + +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{ + BlockListTypeAll, + BlockListTypeCommitted, + BlockListTypeUncommitted, + } +} + +type CopyStatusType string + +const ( + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypeAborted, + CopyStatusTypeFailed, + CopyStatusTypePending, + CopyStatusTypeSuccess, + } +} + +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeOnly, + } +} + +type DeleteType string + +const ( + DeleteTypeNone DeleteType = "None" + DeleteTypePermanent DeleteType = "Permanent" +) + +// PossibleDeleteTypeValues returns the possible values for the DeleteType const type. +func PossibleDeleteTypeValues() []DeleteType { + return []DeleteType{ + DeleteTypeNone, + DeleteTypePermanent, + } +} + +type EncryptionAlgorithmType string + +const ( + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{ + EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, + } +} + +type ExpiryOptions string + +const ( + ExpiryOptionsAbsolute ExpiryOptions = "Absolute" + ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire" + ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation" + ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow" +) + +// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type. +func PossibleExpiryOptionsValues() []ExpiryOptions { + return []ExpiryOptions{ + ExpiryOptionsAbsolute, + ExpiryOptionsNeverExpire, + ExpiryOptionsRelativeToCreation, + ExpiryOptionsRelativeToNow, + } +} + +type FilterBlobsIncludeItem string + +const ( + FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none" + FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions" +) + +// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type. +func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem { + return []FilterBlobsIncludeItem{ + FilterBlobsIncludeItemNone, + FilterBlobsIncludeItemVersions, + } +} + +type ImmutabilityPolicyMode string + +const ( + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" + ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" + ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" +) + +// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. +func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { + return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeLocked, + ImmutabilityPolicyModeMutable, + ImmutabilityPolicyModeUnlocked, + } +} + +type ImmutabilityPolicySetting string + +const ( + ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" +) + +// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. +func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { + return []ImmutabilityPolicySetting{ + ImmutabilityPolicySettingLocked, + ImmutabilityPolicySettingUnlocked, + } +} + +type LeaseDurationType string + +const ( + LeaseDurationTypeFixed LeaseDurationType = "fixed" + LeaseDurationTypeInfinite LeaseDurationType = "infinite" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeFixed, + LeaseDurationTypeInfinite, + } +} + +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeLeased LeaseStateType = "leased" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + LeaseStateTypeExpired, + LeaseStateTypeLeased, + } +} + +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +type ListBlobsIncludeItem string + +const ( + ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" + ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" +) + +// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. +func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { + return []ListBlobsIncludeItem{ + ListBlobsIncludeItemCopy, + ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemDeletedwithversions, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, + } +} + +type ListContainersIncludeType string + +const ( + ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" + ListContainersIncludeTypeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ + ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeMetadata, + ListContainersIncludeTypeSystem, + } +} + +type PremiumPageBlobAccessTier string + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = "P10" + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = "P15" + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = "P20" + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = "P30" + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = "P4" + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = "P40" + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = "P50" + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = "P6" + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = "P60" + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = "P70" + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = "P80" +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return []PremiumPageBlobAccessTier{ + PremiumPageBlobAccessTierP10, + PremiumPageBlobAccessTierP15, + PremiumPageBlobAccessTierP20, + PremiumPageBlobAccessTierP30, + PremiumPageBlobAccessTierP4, + PremiumPageBlobAccessTierP40, + PremiumPageBlobAccessTierP50, + PremiumPageBlobAccessTierP6, + PremiumPageBlobAccessTierP60, + PremiumPageBlobAccessTierP70, + PremiumPageBlobAccessTierP80, + } +} + +type PublicAccessType string + +const ( + PublicAccessTypeBlob PublicAccessType = "blob" + PublicAccessTypeContainer PublicAccessType = "container" +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{ + PublicAccessTypeBlob, + PublicAccessTypeContainer, + } +} + +// QueryFormatType - The quick query format type. +type QueryFormatType string + +const ( + QueryFormatTypeArrow QueryFormatType = "arrow" + QueryFormatTypeDelimited QueryFormatType = "delimited" + QueryFormatTypeJSON QueryFormatType = "json" + QueryFormatTypeParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{ + QueryFormatTypeArrow, + QueryFormatTypeDelimited, + QueryFormatTypeJSON, + QueryFormatTypeParquet, + } +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority string + +const ( + RehydratePriorityHigh RehydratePriority = "High" + RehydratePriorityStandard RehydratePriority = "Standard" +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return []RehydratePriority{ + RehydratePriorityHigh, + RehydratePriorityStandard, + } +} + +type SKUName string + +const ( + SKUNamePremiumLRS SKUName = "Premium_LRS" + SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNameStandardRAGRS SKUName = "Standard_RAGRS" + SKUNameStandardZRS SKUName = "Standard_ZRS" +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return []SKUName{ + SKUNamePremiumLRS, + SKUNameStandardGRS, + SKUNameStandardLRS, + SKUNameStandardRAGRS, + SKUNameStandardZRS, + } +} + +type SequenceNumberActionType string + +const ( + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" + SequenceNumberActionTypeMax SequenceNumberActionType = "max" + SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{ + SequenceNumberActionTypeIncrement, + SequenceNumberActionTypeMax, + SequenceNumberActionTypeUpdate, + } +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" + StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" + StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCode = "BlobImmutableDueToPolicy" + StorageErrorCodeBlobNotArchived StorageErrorCode = "BlobNotArchived" + StorageErrorCodeBlobNotFound StorageErrorCode = "BlobNotFound" + StorageErrorCodeBlobOverwritten StorageErrorCode = "BlobOverwritten" + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCode = "BlobTierInadequateForContentLength" + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCode = "BlobUsesCustomerSpecifiedEncryption" + StorageErrorCodeBlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" + StorageErrorCodeBlockListTooLong StorageErrorCode = "BlockListTooLong" + StorageErrorCodeCannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" + StorageErrorCodeCannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerAlreadyExists StorageErrorCode = "ContainerAlreadyExists" + StorageErrorCodeContainerBeingDeleted StorageErrorCode = "ContainerBeingDeleted" + StorageErrorCodeContainerDisabled StorageErrorCode = "ContainerDisabled" + StorageErrorCodeContainerNotFound StorageErrorCode = "ContainerNotFound" + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" + StorageErrorCodeCopyIDMismatch StorageErrorCode = "CopyIdMismatch" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidBlobOrBlock StorageErrorCode = "InvalidBlobOrBlock" + StorageErrorCodeInvalidBlobTier StorageErrorCode = "InvalidBlobTier" + StorageErrorCodeInvalidBlobType StorageErrorCode = "InvalidBlobType" + StorageErrorCodeInvalidBlockID StorageErrorCode = "InvalidBlockId" + StorageErrorCodeInvalidBlockList StorageErrorCode = "InvalidBlockList" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidOperation StorageErrorCode = "InvalidOperation" + StorageErrorCodeInvalidPageRange StorageErrorCode = "InvalidPageRange" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidSourceBlobType StorageErrorCode = "InvalidSourceBlobType" + StorageErrorCodeInvalidSourceBlobURL StorageErrorCode = "InvalidSourceBlobUrl" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCode = "InvalidVersionForPageBlobOperation" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeLeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" + StorageErrorCodeLeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCode = "LeaseIdMismatchWithBlobOperation" + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCode = "LeaseIdMismatchWithContainerOperation" + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" + StorageErrorCodeLeaseIDMissing StorageErrorCode = "LeaseIdMissing" + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" + StorageErrorCodeLeaseLost StorageErrorCode = "LeaseLost" + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCode = "LeaseNotPresentWithBlobOperation" + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCode = "LeaseNotPresentWithContainerOperation" + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCode = "MaxBlobSizeConditionNotMet" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeNoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" + StorageErrorCodeNoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCode = "OperationNotAllowedOnIncrementalCopyBlob" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodePendingCopyOperation StorageErrorCode = "PendingCopyOperation" + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" + StorageErrorCodePreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" + StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent" + StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" + StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse" + StorageErrorCodeTargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCode = "UnauthorizedBlobOverwrite" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAppendPositionConditionNotMet, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeBlobAlreadyExists, + StorageErrorCodeBlobArchived, + StorageErrorCodeBlobBeingRehydrated, + StorageErrorCodeBlobImmutableDueToPolicy, + StorageErrorCodeBlobNotArchived, + StorageErrorCodeBlobNotFound, + StorageErrorCodeBlobOverwritten, + StorageErrorCodeBlobTierInadequateForContentLength, + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, + StorageErrorCodeBlockCountExceedsLimit, + StorageErrorCodeBlockListTooLong, + StorageErrorCodeCannotChangeToLowerTier, + StorageErrorCodeCannotVerifyCopySource, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerAlreadyExists, + StorageErrorCodeContainerBeingDeleted, + StorageErrorCodeContainerDisabled, + StorageErrorCodeContainerNotFound, + StorageErrorCodeContentLengthLargerThanTierLimit, + StorageErrorCodeCopyAcrossAccountsNotSupported, + StorageErrorCodeCopyIDMismatch, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeIncrementalCopyBlobMismatch, + StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopySourceMustBeSnapshot, + StorageErrorCodeInfiniteLeaseDurationRequired, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidBlobOrBlock, + StorageErrorCodeInvalidBlobTier, + StorageErrorCodeInvalidBlobType, + StorageErrorCodeInvalidBlockID, + StorageErrorCodeInvalidBlockList, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidOperation, + StorageErrorCodeInvalidPageRange, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidSourceBlobType, + StorageErrorCodeInvalidSourceBlobURL, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidVersionForPageBlobOperation, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeLeaseAlreadyBroken, + StorageErrorCodeLeaseAlreadyPresent, + StorageErrorCodeLeaseIDMismatchWithBlobOperation, + StorageErrorCodeLeaseIDMismatchWithContainerOperation, + StorageErrorCodeLeaseIDMismatchWithLeaseOperation, + StorageErrorCodeLeaseIDMissing, + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, + StorageErrorCodeLeaseLost, + StorageErrorCodeLeaseNotPresentWithBlobOperation, + StorageErrorCodeLeaseNotPresentWithContainerOperation, + StorageErrorCodeLeaseNotPresentWithLeaseOperation, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMaxBlobSizeConditionNotMet, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeNoAuthenticationInformation, + StorageErrorCodeNoPendingCopyOperation, + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodePendingCopyOperation, + StorageErrorCodePreviousSnapshotCannotBeNewer, + StorageErrorCodePreviousSnapshotNotFound, + StorageErrorCodePreviousSnapshotOperationNotSupported, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeSequenceNumberConditionNotMet, + StorageErrorCodeSequenceNumberIncrementTooLarge, + StorageErrorCodeServerBusy, + StorageErrorCodeSnapshotCountExceeded, + StorageErrorCodeSnapshotOperationRateExceeded, + StorageErrorCodeSnapshotsPresent, + StorageErrorCodeSourceConditionNotMet, + StorageErrorCodeSystemInUse, + StorageErrorCodeTargetConditionNotMet, + StorageErrorCodeUnauthorizedBlobOverwrite, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go new file mode 100644 index 00000000000..dbc2a293ec6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -0,0 +1,1591 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ContainerClient contains the methods for the Container group. +// Don't use this type directly, use a constructor function instead. +type ContainerClient struct { + internal *azcore.Client + endpoint string +} + +// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + var err error + req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientAcquireLeaseResponse{}, err + } + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { + result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + var err error + req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientBreakLeaseResponse{}, err + } + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { + result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + var err error + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientChangeLeaseResponse{}, err + } + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { + result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Create - creates a new container under the specified account. If the container with the same name already exists, the operation +// fails +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + var err error + req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) + if err != nil { + return ContainerClientCreateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientCreateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientCreateResponse{}, err + } + resp, err := client.createHandleResponse(httpResp) + return resp, err +} + +// createCreateRequest creates the Create request. +func (client *ContainerClient) createCreateRequest(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if containerCPKScopeInfo != nil && containerCPKScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header["x-ms-default-encryption-scope"] = []string{*containerCPKScopeInfo.DefaultEncryptionScope} + } + if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { + result := ContainerClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later +// deleted during garbage collection +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + var err error + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientDeleteResponse{}, err + } + resp, err := client.deleteHandleResponse(httpResp) + return resp, err +} + +// deleteCreateRequest creates the Delete request. +func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (ContainerClientDeleteResponse, error) { + result := ContainerClientDeleteResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression. +// Filter blobs searches within the given container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientFilterBlobsResponse{}, err + } + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) { + result := ContainerClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may +// be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + var err error + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccessPolicyResponse{}, err + } + resp, err := client.getAccessPolicyHandleResponse(httpResp) + return resp, err +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerClientGetAccessPolicyResponse, error) { + result := ContainerClientGetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo +// method. +func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + var err error + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccountInfoResponse{}, err + } + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { + result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + var err error + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetPropertiesResponse{}, err + } + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { + result := ContainerClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { + hasImmutabilityPolicy, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasImmutabilityPolicy = &hasImmutabilityPolicy + } + if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { + hasLegalHold, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.HasLegalHold = &hasLegalHold + } + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// Generated from API version 2023-11-03 +// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +// +// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. +func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerClientListBlobFlatSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. +func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { + result := ContainerClientListBlobFlatSegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { + return ContainerClientListBlobFlatSegmentResponse{}, err + } + return result, nil +} + +// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// Generated from API version 2023-11-03 +// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// - options - ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) *runtime.Pager[ContainerClientListBlobHierarchySegmentResponse] { + return runtime.NewPager(runtime.PagingHandler[ContainerClientListBlobHierarchySegmentResponse]{ + More: func(page ContainerClientListBlobHierarchySegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextMarker + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, nil) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + return client.ListBlobHierarchySegmentHandleResponse(resp) + }, + }) +} + +// ListBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + reqQP.Set("delimiter", delimiter) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { + result := ContainerClientListBlobHierarchySegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { + return ContainerClientListBlobHierarchySegmentResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + var err error + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientReleaseLeaseResponse{}, err + } + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { + result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Rename - Renames an existing container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - sourceContainerName - Required. Specifies the name of the container to rename. +// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + var err error + req, err := client.renameCreateRequest(ctx, sourceContainerName, options) + if err != nil { + return ContainerClientRenameResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRenameResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenameResponse{}, err + } + resp, err := client.renameHandleResponse(httpResp) + return resp, err +} + +// renameCreateRequest creates the Rename request. +func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "rename") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-source-container-name"] = []string{sourceContainerName} + if options != nil && options.SourceLeaseID != nil { + req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renameHandleResponse handles the Rename response. +func (client *ContainerClient) renameHandleResponse(resp *http.Response) (ContainerClientRenameResponse, error) { + result := ContainerClientRenameResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenameResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + var err error + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenewLeaseResponse{}, err + } + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { + result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Restore - Restores a previously-deleted container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + var err error + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRestoreResponse{}, err + } + resp, err := client.restoreHandleResponse(httpResp) + return resp, err +} + +// restoreCreateRequest creates the Restore request. +func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options *ContainerClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.DeletedContainerName != nil { + req.Raw().Header["x-ms-deleted-container-name"] = []string{*options.DeletedContainerName} + } + if options != nil && options.DeletedContainerVersion != nil { + req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (ContainerClientRestoreResponse, error) { + result := ContainerClientRestoreResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRestoreResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container +// may be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - containerACL - the acls for the container +// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + var err error + req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetAccessPolicyResponse{}, err + } + resp, err := client.setAccessPolicyHandleResponse(httpResp) + return resp, err +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { + return nil, err + } + return req, nil +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { + result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + var err error + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetMetadataResponse{}, err + } + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { + result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + var err error + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSubmitBatchResponse{}, err + } + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ContainerClient) submitBatchHandleResponse(resp *http.Response) (ContainerClientSubmitBatchResponse, error) { + result := ContainerClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go new file mode 100644 index 00000000000..7251de83952 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -0,0 +1,544 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + + // the permissions for the acl policy + Permission *string `xml:"Permission"` + + // the date-time the policy is active + Start *time.Time `xml:"Start"` +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. +type ArrowConfiguration struct { + // REQUIRED + Schema []*ArrowField `xml:"Schema>Field"` +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // REQUIRED + Type *string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +type BlobFlatListSegment struct { + // REQUIRED + BlobItems []*BlobItem `xml:"Blob"` +} + +type BlobHierarchyListSegment struct { + // REQUIRED + BlobItems []*BlobItem `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +} + +// BlobItem - An Azure Storage blob +type BlobItem struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *BlobProperties `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +type BlobName struct { + // The name of the blob. + Content *string `xml:",chardata"` + + // Indicates if the blob name is encoded. + Encoded *bool `xml:"Encoded,attr"` +} + +type BlobPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// BlobProperties - Properties of a blob +type BlobProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + AccessTier *AccessTier `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus *ArchiveStatus `xml:"ArchiveStatus"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType *BlobType `xml:"BlobType"` + CacheControl *string `xml:"Cache-Control"` + ContentDisposition *string `xml:"Content-Disposition"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + + // Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentType *string `xml:"Content-Type"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyID *string `xml:"CopyId"` + CopyProgress *string `xml:"CopyProgress"` + CopySource *string `xml:"CopySource"` + CopyStatus *CopyStatusType `xml:"CopyStatus"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + CreationTime *time.Time `xml:"Creation-Time"` + CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` + DeletedTime *time.Time `xml:"DeletedTime"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` + + // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High + // and Standard. + RehydratePriority *RehydratePriority `xml:"RehydratePriority"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + TagCount *int32 `xml:"TagCount"` +} + +type BlobTag struct { + // REQUIRED + Key *string `xml:"Key"` + + // REQUIRED + Value *string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + // REQUIRED + BlobTagSet []*BlobTag `xml:"TagSet>Tag"` +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // REQUIRED; The base64 encoded block ID. + Name *string `xml:"Name"` + + // REQUIRED; The block size in bytes. + Size *int64 `xml:"Size"` +} + +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *ContainerProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CORSRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // REQUIRED + ContainerName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` + VersionID *string `xml:"VersionId"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterBlobItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + // REQUIRED + ContainerItems []*ContainerItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` + + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` + + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` +} + +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // parquet configuration + ParquetTextConfiguration any `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // CONSTANT; Required. The type of the provided query expression. + // Field has constant value "SQL", any specified value is ignored. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +type StorageError struct { + Message *string +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + CORS []*CORSRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOID *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTID *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go new file mode 100644 index 00000000000..7e094db8730 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -0,0 +1,472 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*dateTimeRFC3339)(a.Expiry), + Start: (*dateTimeRFC3339)(a.Start), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItem `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. +func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobHierarchyListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItem `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + if b.BlobPrefixes != nil { + aux.BlobPrefixes = &b.BlobPrefixes + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlobProperties. +func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlobProperties + aux := &struct { + *alias + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), + CreationTime: (*dateTimeRFC1123)(b.CreationTime), + DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), + ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), + LastModified: (*dateTimeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) + aux.ContentMD5 = &encodedContentMD5 + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobProperties. +func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobProperties + aux := &struct { + *alias + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.ContentMD5 != nil { + if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { + return err + } + } + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. +func (c *ContainerItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias ContainerItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. +func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), + LastModified: (*dateTimeRFC1123)(c.LastModified), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. +func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + c.DeletedTime = (*time.Time)(aux.DeletedTime) + c.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. +func (l ListContainersSegmentResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListContainersSegmentResponse + aux := &struct { + *alias + ContainerItems *[]*ContainerItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.ContainerItems != nil { + aux.ContainerItems = &l.ContainerItems + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return enc.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + CORS *[]*CORSRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.CORS != nil { + aux.CORS = &s.CORS + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), + SignedStart: (*dateTimeRFC3339)(u.SignedStart), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + u.SignedStart = (*time.Time)(aux.SignedStart) + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func populateAny(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go new file mode 100644 index 00000000000..216f8b73ae9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -0,0 +1,1469 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go new file mode 100644 index 00000000000..cb6a19f7a33 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -0,0 +1,1295 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +// PageBlobClient contains the methods for the PageBlob group. +// Don't use this type directly, use a constructor function instead. +type PageBlobClient struct { + internal *azcore.Client + endpoint string +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + var err error + req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientClearPagesResponse{}, err + } + resp, err := client.clearPagesHandleResponse(httpResp) + return resp, err +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { + result := PageBlobClientClearPagesResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// - options - PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + var err error + req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.copyIncrementalHandleResponse(httpResp) + return resp, err +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { + result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + var err error + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCreateResponse{}, err + } + resp, err := client.createHandleResponse(httpResp) + return resp, err +} + +// createCreateRequest creates the Create request. +func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { + result := PageBlobClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + return result, nil +} + +// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot +// of a page blob +// +// Generated from API version 2023-11-03 +// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesResponse]{ + More: func(page PageBlobClientGetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextMarker + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return client.GetPageRangesHandleResponse(resp) + }, + }) +} + +// GetPageRangesCreateRequest creates the GetPageRanges request. +func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesHandleResponse handles the GetPageRanges response. +func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { + result := PageBlobClientGetPageRangesResponse{} + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that +// were changed between target blob and previous snapshot. +// +// Generated from API version 2023-11-03 +// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *runtime.Pager[PageBlobClientGetPageRangesDiffResponse] { + return runtime.NewPager(runtime.PagingHandler[PageBlobClientGetPageRangesDiffResponse]{ + More: func(page PageBlobClientGetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { + nextLink := "" + if page != nil { + nextLink = *page.NextMarker + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return client.GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// GetPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context, options *PageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { + result := PageBlobClientGetPageRangesDiffResponse{} + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + var err error + req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientResizeResponse{}, err + } + resp, err := client.resizeHandleResponse(httpResp) + return resp, err +} + +// resizeCreateRequest creates the Resize request. +func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { + result := PageBlobClientResizeResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + var err error + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.updateSequenceNumberHandleResponse(httpResp) + return resp, err +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { + result := PageBlobClientUpdateSequenceNumberResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - body - Initial data +// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + var err error + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesResponse{}, err + } + resp, err := client.uploadPagesHandleResponse(httpResp) + return resp, err +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { + result := PageBlobClientUploadPagesResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - sourceURL - Specify a URL to the copy source. +// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// - contentLength - The length of the request. +// - rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// - options - PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL +// method. +// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +// - SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages +// method. +// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL +// method. +func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + var err error + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.uploadPagesFromURLHandleResponse(httpResp) + return resp, err +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + } + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { + result := PageBlobClientUploadPagesFromURLResponse{} + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go new file mode 100644 index 00000000000..738d23c8f19 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -0,0 +1,2016 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobClientAppendBlockFromURLResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobClientAppendBlockResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobClientSealResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type BlobClientAbortCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type BlobClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BlobClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type BlobClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type BlobClientCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type BlobClientCreateSnapshotResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. +type BlobClientDeleteImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDeleteResponse contains the response from method BlobClient.Delete. +type BlobClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDownloadResponse contains the response from method BlobClient.Download. +type BlobClientDownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type BlobClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. +type BlobClientGetPropertiesResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. +type BlobClientGetTagsResponse struct { + // Blob tags + BlobTags + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientQueryResponse contains the response from method BlobClient.Query. +type BlobClientQueryResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. +type BlobClientSetExpiryResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobClientSetHTTPHeadersResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type BlobClientSetImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiry *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type BlobClientSetLegalHoldResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. +type BlobClientSetTagsResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. +type BlobClientSetTierResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobClientStartCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobClientUndeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobClientCommitBlockListResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobClientGetBlockListResponse struct { + BlockList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. +type BlockBlobClientPutBlobFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobClientStageBlockFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobClientStageBlockResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobClientUploadResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. +type ContainerClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. +type ContainerClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. +type ContainerClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientCreateResponse contains the response from method ContainerClient.Create. +type ContainerClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. +type ContainerClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerClientGetAccessPolicyResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. +type ContainerClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. +type ContainerClientGetPropertiesResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. +type ContainerClientListBlobFlatSegmentResponse struct { + // An enumeration of blobs + ListBlobsFlatSegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. +type ContainerClientListBlobHierarchySegmentResponse struct { + // An enumeration of blobs + ListBlobsHierarchySegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. +type ContainerClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. +type ContainerClientRenameResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. +type ContainerClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. +type ContainerClientRestoreResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. +type ContainerClientSetAccessPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. +type ContainerClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. +type ContainerClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. +type PageBlobClientClearPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. +type PageBlobClientCopyIncrementalResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. +type PageBlobClientGetPageRangesDiffResponse struct { + // the list of pages + PageList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. +type PageBlobClientGetPageRangesResponse struct { + // the list of pages + PageList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. +type PageBlobClientResizeResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. +type PageBlobClientUpdateSequenceNumberResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. +type PageBlobClientUploadPagesFromURLResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobClientUploadPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. +type ServiceClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. +type ServiceClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + // Storage Service Properties. + StorageServiceProperties + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { + // Stats for the storage service. + StorageServiceStats + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type ServiceClientGetUserDelegationKeyResponse struct { + // A user delegation key + UserDelegationKey + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. +type ServiceClientListContainersSegmentResponse struct { + // An enumeration of containers + ListContainersSegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. +type ServiceClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go new file mode 100644 index 00000000000..c792fbf094b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -0,0 +1,580 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use a constructor function instead. +type ServiceClient struct { + internal *azcore.Client + endpoint string +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search +// expression. Filter blobs searches across all containers within a storage account but can +// be scoped within the expression to a single container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientFilterBlobsResponse{}, err + } + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) { + result := ServiceClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ServiceClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + var err error + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetAccountInfoResponse{}, err + } + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { + result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return ServiceClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + var err error + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetPropertiesResponse{}, err + } + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary +// location endpoint when read-access geo-redundant replication is enabled for the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + var err error + req, err := client.getStatisticsCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetStatisticsResponse{}, err + } + resp, err := client.getStatisticsHandleResponse(httpResp) + return resp, err +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { + result := ServiceClientGetStatisticsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + return result, nil +} + +// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - keyInfo - Key information +// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey +// method. +func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + var err error + req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetUserDelegationKeyResponse{}, err + } + resp, err := client.getUserDelegationKeyHandleResponse(httpResp) + return resp, err +} + +// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. +func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "userdelegationkey") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, keyInfo); err != nil { + return nil, err + } + return req, nil +} + +// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. +func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) { + result := ServiceClientGetUserDelegationKeyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { + return ServiceClientGetUserDelegationKeyResponse{}, err + } + return result, nil +} + +// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified +// account +// +// Generated from API version 2023-11-03 +// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +// +// listContainersSegmentCreateRequest creates the ListContainersSegment request. +func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listContainersSegmentHandleResponse handles the ListContainersSegment response. +func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListContainersSegmentResponse, error) { + result := ServiceClientListContainersSegmentResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { + return ServiceClientListContainersSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + var err error + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSetPropertiesResponse{}, err + } + resp, err := client.setPropertiesHandleResponse(httpResp) + return resp, err +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { + return nil, err + } + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-03 +// - contentLength - The length of the request. +// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// - body - Initial data +// - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + var err error + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ServiceClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSubmitBatchResponse{}, err + } + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + req.Raw().Header["Content-Type"] = []string{multipartContentType} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) { + result := ServiceClientSubmitBatchResponse{Body: resp.Body} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go new file mode 100644 index 00000000000..58665032972 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -0,0 +1,42 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "strings" + "time" +) + +const ( + dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` +) + +type dateTimeRFC1123 time.Time + +func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) + return b, nil +} + +func (t dateTimeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) + *t = dateTimeRFC1123(p) + return err +} + +func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = dateTimeRFC1123(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go new file mode 100644 index 00000000000..82b370133fa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -0,0 +1,58 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "regexp" + "strings" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time + +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON + if tzOffsetRegex.Match(data) { + layout = dateTimeJSON + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *dateTimeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = dateTimeRFC3339(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go new file mode 100644 index 00000000000..1bd0e4de05a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "encoding/xml" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + tokValue := "" + for { + t, err := d.Token() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + tokValue = "" + case xml.CharData: + if tokName == "" { + continue + } + tokValue = string(tt) + case xml.EndElement: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + (*ap)[tokName] = to.Ptr(tokValue) + tokName = "" + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go new file mode 100644 index 00000000000..9f95ad8ae43 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -0,0 +1,81 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +const ( + DefaultConcurrency = 5 +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + NumChunks uint64 + Concurrency uint16 + Operation func(ctx context.Context, offset int64, chunkSize int64) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = DefaultConcurrency // default concurrency + } + + // Prepare and do parallel operations. + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, o.NumChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == o.NumChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + operationChannel <- func() error { + return o.Operation(ctx, offset, curChunkSize) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go new file mode 100644 index 00000000000..e3aa4a4886d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +type BufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan Mmb + count int + max int + size int64 +} + +func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] { + return &mmbPool{ + buffers: make(chan Mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan Mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := NewMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer Mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.Delete() + } + pool.count = 0 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go new file mode 100644 index 00000000000..8d4d35bdeff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" +) + +type bytesWriter []byte + +func NewBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go new file mode 100644 index 00000000000..fff61016c85 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -0,0 +1,115 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +type storageAuthorizer struct { + scopes []string + tenantID string +} + +func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string, allowHTTP bool) policy.Policy { + s := storageAuthorizer{scopes: []string{audience}} + return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: s.onRequest, + OnChallenge: s.onChallenge, + }, + InsecureAllowCredentialWithHTTP: allowHTTP, + }) +} + +func (s *storageAuthorizer) onRequest(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +func (s *storageAuthorizer) onChallenge(req *policy.Request, resp *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + err := s.parseChallenge(resp) + if err != nil { + return err + } + // TODO: Set tenantID when policy.TokenRequestOptions supports it. https://github.com/Azure/azure-sdk-for-go/issues/19841 + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/authorize +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + if len(parts) >= 3 { + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant + } else { + return "" + } +} + +func (s *storageAuthorizer) parseChallenge(resp *http.Response) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization_uri=\"\" resource_id=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + s.tenantID = parseTenant(vals["authorization_uri"]) + + scope := vals["resource_id"] + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + s.scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go new file mode 100644 index 00000000000..cdcadf31160 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go @@ -0,0 +1,38 @@ +//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) +// +build go1.18 +// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "fmt" + "os" + "syscall" +) + +// mmb is a memory mapped buffer +type Mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { + prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE + addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) + if err != nil { + return nil, os.NewSyscallError("Mmap", err) + } + return Mmb(addr), nil +} + +// delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("Munmap error: %v", err)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go new file mode 100644 index 00000000000..ef9fdc2a1f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go @@ -0,0 +1,56 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "fmt" + "os" + "reflect" + "syscall" + "unsafe" +) + +// Mmb is a memory mapped buffer +type Mmb []byte + +// NewMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { + const InvalidHandleValue = ^uintptr(0) // -1 + + prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil) + if err != nil { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + defer func() { + _ = syscall.CloseHandle(hMMF) + }() + + addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) + if err != nil { + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + m := Mmb{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = int(size) + h.Cap = h.Len + return m, nil +} + +// Delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = Mmb{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("UnmapViewOfFile error: %v", err)) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go new file mode 100644 index 00000000000..c8528a2e3ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go new file mode 100644 index 00000000000..c7922076f3c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -0,0 +1,271 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "hash/crc64" + "io" + "net" + "net/url" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" + HeaderXmsVersion = "x-ms-version" + HeaderXmsRequestID = "x-ms-request-id" +) + +const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 + +var CRC64Table = crc64.MakeTable(crc64Polynomial) + +// CopyOptions returns a zero-value T if opts is nil. +// If opts is not nil, a copy is made and its address returned. +func CopyOptions[T any](opts *T) *T { + if opts == nil { + return new(T) + } + cp := *opts + return &cp +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"] + accountName, has_accountName := connStrMap["AccountName"] + + var serviceURL string + if has_blobEndpoint { + serviceURL = blobEndpoint + } else if has_accountName { + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint") + } + + if !strings.HasSuffix(serviceURL, "/") { + // add a trailing slash to be consistent with the portal + serviceURL += "/" + } + + accountKey, has_accountKey := connStrMap["AccountKey"] + sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"] + + if has_accountName && has_accountKey { + return ParsedConnectionString{ + ServiceURL: serviceURL, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } else if has_sharedAccessSignature { + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature), + }, nil + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature") + } + +} + +// SerializeBlobTags converts tags to generated.BlobTags +func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { + blobTagSet := make([]*generated.BlobTag, 0) + for key, val := range tagsMap { + newKey, newVal := key, val + blobTagSet = append(blobTagSet, &generated.BlobTag{Key: &newKey, Value: &newVal}) + } + return &generated.BlobTags{BlobTagSet: blobTagSet} +} + +func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if len(tagsMap) == 0 { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} + +func RangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +// ReadAtLeast reads from r into buf until it has read at least min bytes. +// It returns the number of bytes copied and an error. +// The EOF error is returned if no bytes were read or +// EOF happened after reading fewer than min bytes. +// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. +// On return, n >= min if and only if err == nil. +// If r returns an error having read at least min bytes, the error is dropped. +// This method is same as io.ReadAtLeast except that it does not +// return io.ErrUnexpectedEOF when fewer than min bytes are read. +func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { + if len(buf) < min { + return 0, io.ErrShortBuffer + } + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n >= min { + err = nil + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go new file mode 100644 index 00000000000..4d26992d3eb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +const ( + // EventUpload is used for logging events related to upload operation. + EventUpload = exported.EventUpload + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch = exported.EventSubmitBatch +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md new file mode 100644 index 00000000000..1b1a4b45d54 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/migrationguide.md @@ -0,0 +1,76 @@ +# Guide to migrate from `azure-storage-blob-go` to `azblob` + +This guide is intended to assist in the migration from the `azure-storage-blob-go` module, or previous betas of `azblob`, to the latest releases of the `azblob` module. + +## Simplified API surface area + +The redesign of the `azblob` module separates clients into various sub-packages. +In previous versions, the public surface area was "flat", so all clients and supporting types were in the `azblob` package. +This made it difficult to navigate the public surface area. + +## Clients + +In `azure-storage-blob-go` a client constructor always requires a `url.URL` and `Pipeline` parameters. + +In `azblob` a client constructor always requires a `string` URL, any specified credential type, and a `*ClientOptions` for optional values. You pass `nil` to accept default options. + +```go +// new code +client, err := azblob.NewClient("", cred, nil) +``` + +## Authentication + +In `azure-storage-blob-go` you created a `Pipeline` with the required credential type. This pipeline was then passed to the client constructor. + +In `azblob`, you pass the required credential directly to the client constructor. + +```go +// new code. cred is an AAD token credential created from the azidentity module +client, err := azblob.NewClient("", cred, nil) +``` + +The `azure-storage-blob-go` module provided limited support for OAuth token authentication via `NewTokenCredential`. +This been replaced by using Azure Identity credentials from [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#section-readme). + +Authentication with a shared key via `NewSharedKeyCredential` remains unchanged. + +In `azure-storage-blob-go` you created a `Pipeline` with `NewAnonymousCredential` to support anonymous or SAS authentication. + +In `azblob` you use the construtor `NewClientWithNoCredential()` instead. + +```go +// new code +client, err := azblob.NewClientWithNoCredential("", nil) +``` + +## Listing blobs/containers + +In `azure-storage-blob-go` you explicitly created a `Marker` type that was used to page over results ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-package)). + +In `azblob`, operations that return paginated values return a `*runtime.Pager[T]`. + +```go +// new code +pager := client.NewListBlobsFlatPager("my-container", nil) +for pager.More() { + page, err := pager.NextPage(context.TODO()) + // process results +} +``` + +## Configuring the HTTP pipeline + +In `azure-storage-blob-go` you explicitly created a HTTP pipeline with configuration before creating a client. +This pipeline instance was then passed as an argument to the client constructor ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-NewPipeline)). + +In `azblob` a HTTP pipeline is created during client construction. The pipeline is configured through the `azcore.ClientOptions` type. + +```go +// new code +client, err := azblob.NewClient(account, cred, &azblob.ClientOptions{ + ClientOptions: azcore.ClientOptions{ + // configure HTTP pipeline options here + }, +}) +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go new file mode 100644 index 00000000000..2896788e1f1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/models.go @@ -0,0 +1,69 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerOptions contains the optional parameters for the ContainerClient.Create method. +type CreateContainerOptions = service.CreateContainerOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = service.DeleteContainerOptions + +// DeleteBlobOptions contains the optional parameters for the Client.Delete method. +type DeleteBlobOptions = blob.DeleteOptions + +// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. +type DownloadStreamOptions = blob.DownloadStreamOptions + +// ListBlobsFlatOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method. +type ListBlobsFlatOptions = container.ListBlobsFlatOptions + +// ListBlobsInclude indicates what additional information the service should return with each blob. +type ListBlobsInclude = container.ListBlobsInclude + +// ListContainersOptions contains the optional parameters for the container.Client.ListContainers operation +type ListContainersOptions = service.ListContainersOptions + +// UploadBufferOptions provides set of configurations for UploadBuffer operation +type UploadBufferOptions = blockblob.UploadBufferOptions + +// UploadFileOptions provides set of configurations for UploadFile operation +type UploadFileOptions = blockblob.UploadFileOptions + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions = blockblob.UploadStreamOptions + +// DownloadBufferOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadBufferOptions = blob.DownloadBufferOptions + +// DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions. +type DownloadFileOptions = blob.DownloadFileOptions + +// CPKInfo contains a group of parameters for client provided encryption key. +type CPKInfo = blob.CPKInfo + +// CPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type CPKScopeInfo = container.CPKScopeInfo + +// AccessConditions identifies blob-specific access conditions which you optionally set. +type AccessConditions = exported.BlobAccessConditions + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude = service.ListContainersInclude + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy = blob.ObjectReplicationPolicy + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions = blob.RetryReaderOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go new file mode 100644 index 00000000000..30d0253af91 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -0,0 +1,454 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "context" + "io" + "net/http" + "net/url" + "os" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a client to an Azure Storage page blob; +type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] + +// NewClient creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a blob or with a shared access signature (SAS) token. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - blobURL - the URL of the blob e.g. https://.blob.core.windows.net/container/blob.txt +// - cred - a SharedKeyCredential created with the matching blob's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - containerName - the name of the container within the storage account +// - blobName - the name of the blob within the container +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (pb *Client) generated() *generated.PageBlobClient { + _, pageBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return pageBlob +} + +// URL returns the URL endpoint used by the Client object. +func (pb *Client) URL() string { + return pb.generated().Endpoint() +} + +// BlobClient returns the embedded blob client for this AppendBlob client. +func (pb *Client) BlobClient() *blob.Client { + innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) + return (*blob.Client)(innerBlob) +} + +func (pb *Client) sharedKey() *blob.SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb)) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the version returning a URL to the base blob. +func (pb *Client) WithVersionID(versionID string) (*Client, error) { + p, err := blob.ParseURL(pb.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (CreateResponse, error) { + createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + + resp, err := pb.generated().Create(ctx, 0, size, createOptions, HTTPHeaders, + leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + return resp, err +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, contentRange blob.HTTPRange, options *UploadPagesOptions) (UploadPagesResponse, error) { + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + + if err != nil { + return UploadPagesResponse{}, err + } + + uploadPagesOptions := &generated.PageBlobClientUploadPagesOptions{ + Range: exported.FormatHTTPRange(contentRange), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, uploadPagesOptions) + if err != nil { + return UploadPagesResponse{}, nil + } + } + + resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, + cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb *Client) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, + o *UploadPagesFromURLOptions) (UploadPagesFromURLResponse, error) { + + uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, + modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + resp, err := pb.generated().UploadPagesFromURL(ctx, source, shared.RangeToString(sourceOffset, count), 0, + shared.RangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, + sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + + return resp, err +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *Client) ClearPages(ctx context.Context, rnge blob.HTTPRange, options *ClearPagesOptions) (ClearPagesResponse, error) { + clearOptions := &generated.PageBlobClientClearPagesOptions{ + Range: exported.FormatHTTPRange(rnge), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.generated().ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, + cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return resp, err +} + +// NewGetPageRangesPager returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[GetPageRangesResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesResponse]{ + More: func(page GetPageRangesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesResponse) (GetPageRangesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesResponse{}, err + } + resp, err := pb.generated().InternalClient().Pipeline().Do(req) + if err != nil { + return GetPageRangesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesHandleResponse(resp) + }, + }) +} + +// NewGetPageRangesDiffPager gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtime.Pager[GetPageRangesDiffResponse] { + opts, leaseAccessConditions, modifiedAccessConditions := o.format() + + return runtime.NewPager(runtime.PagingHandler[GetPageRangesDiffResponse]{ + More: func(page GetPageRangesDiffResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *GetPageRangesDiffResponse) (GetPageRangesDiffResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } else { + opts.Marker = page.NextMarker + req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions) + } + if err != nil { + return GetPageRangesDiffResponse{}, err + } + resp, err := pb.generated().InternalClient().Pipeline().Do(req) + if err != nil { + return GetPageRangesDiffResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GetPageRangesDiffResponse{}, runtime.NewResponseError(resp) + } + return pb.generated().GetPageRangesDiffHandleResponse(resp) + }, + }) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) { + resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := pb.generated().Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return resp, err +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb *Client) UpdateSequenceNumber(ctx context.Context, options *UpdateSequenceNumberOptions) (UpdateSequenceNumberResponse, error) { + actionType, updateOptions, lac, mac := options.format() + resp, err := pb.generated().UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) + + return resp, err +} + +// StartCopyIncremental begins an operation to start an incremental copy from one-page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb *Client) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *CopyIncrementalOptions) (CopyIncrementalResponse, error) { + copySourceURL, err := url.Parse(copySource) + if err != nil { + return CopyIncrementalResponse{}, err + } + + queryParams := copySourceURL.Query() + queryParams.Set("snapshot", prevSnapshot) + copySourceURL.RawQuery = queryParams.Encode() + + pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() + resp, err := pb.generated().CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) + + return resp, err +} + +// Redeclared APIs + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (pb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) { + return pb.BlobClient().Delete(ctx, o) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) { + return pb.BlobClient().Undelete(ctx, o) +} + +// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (pb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) { + return pb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options) +} + +// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (pb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) { + return pb.BlobClient().DeleteImmutabilityPolicy(ctx, options) +} + +// SetLegalHold operation enables users to set legal hold on a blob. +// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview +func (pb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) { + return pb.BlobClient().SetLegalHold(ctx, legalHold, options) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (pb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { + return pb.BlobClient().SetTier(ctx, tier, o) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) { + return pb.BlobClient().GetProperties(ctx, o) +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return pb.BlobClient().GetAccountInfo(ctx, o) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) { + return pb.BlobClient().SetMetadata(ctx, metadata, o) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (pb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) { + return pb.BlobClient().CreateSnapshot(ctx, o) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (pb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) { + return pb.BlobClient().StartCopyFromURL(ctx, copySource, o) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (pb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) { + return pb.BlobClient().AbortCopyFromURL(ctx, copyID, o) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (pb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) { + return pb.BlobClient().SetTags(ctx, tags, o) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (pb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) { + return pb.BlobClient().GetTags(ctx, o) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { + return pb.BlobClient().CopyFromURL(ctx, copySource, o) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return pb.BlobClient().GetSASURL(permissions, expiry, o) +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (pb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return pb.BlobClient().DownloadStream(ctx, o) +} + +// DownloadBuffer downloads an Azure blob to a buffer with parallel. +func (pb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) { + return pb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o) +} + +// DownloadFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +func (pb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) { + return pb.BlobClient().DownloadFile(ctx, file, o) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go new file mode 100644 index 00000000000..096a7910aa9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/constants.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + +const ( + // PageBytes indicates the number of bytes in a page (512). + PageBytes = 512 +) + +// CopyStatusType defines values for CopyStatusType +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier. +type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP10 + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP15 + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP20 + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP30 + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP4 + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP40 + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP50 + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP6 + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP60 + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP70 + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP80 +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return generated.PossiblePremiumPageBlobAccessTierValues() +} + +// SequenceNumberActionType defines values for SequenceNumberActionType. +type SequenceNumberActionType = generated.SequenceNumberActionType + +const ( + SequenceNumberActionTypeMax SequenceNumberActionType = generated.SequenceNumberActionTypeMax + SequenceNumberActionTypeUpdate SequenceNumberActionType = generated.SequenceNumberActionTypeUpdate + SequenceNumberActionTypeIncrement SequenceNumberActionType = generated.SequenceNumberActionTypeIncrement +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return generated.PossibleSequenceNumberActionTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go new file mode 100644 index 00000000000..39aef20ff5d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -0,0 +1,330 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// Type Declarations --------------------------------------------------------------------- + +// PageList - the list of pages. +type PageList = generated.PageList + +// ClearRange defines a range of pages. +type ClearRange = generated.ClearRange + +// PageRange defines a range of pages. +type PageRange = generated.PageRange + +// SequenceNumberAccessConditions contains a group of parameters for the Client.UploadPages method. +type SequenceNumberAccessConditions = generated.SequenceNumberAccessConditions + +// Request Model Declaration ------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + SequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + HTTPHeaders *blob.HTTPHeaders + + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo + + AccessConditions *blob.AccessConditions + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *blob.ImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool +} + +func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &generated.PageBlobClientCreateOptions{ + BlobSequenceNumber: o.SequenceNumber, + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + Metadata: o.Metadata, + Tier: o.Tier, + } + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadPagesOptions contains the optional parameters for the Client.UploadPages method. +type UploadPagesOptions struct { + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType + + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + AccessConditions *blob.AccessConditions +} + +func (o *UploadPagesOptions) format() (*generated.LeaseAccessConditions, + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadPagesFromURLOptions contains the optional parameters for the Client.UploadPagesFromURL method. +type UploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. + SourceContentValidation blob.SourceContentValidationType + + CPKInfo *blob.CPKInfo + + CPKScopeInfo *blob.CPKScopeInfo + + SequenceNumberAccessConditions *SequenceNumberAccessConditions + + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions + + AccessConditions *blob.AccessConditions +} + +func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, + *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &generated.PageBlobClientUploadPagesFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + } + + if o.SourceContentValidation != nil { + o.SourceContentValidation.Apply(options) + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ClearPagesOptions contains the optional parameters for the Client.ClearPages operation +type ClearPagesOptions struct { + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + AccessConditions *blob.AccessConditions +} + +func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CPKInfo, + *generated.CPKScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method. +type GetPageRangesOptions struct { + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by MaxResults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by PrevSnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + AccessConditions *blob.AccessConditions +} + +func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return &generated.PageBlobClientGetPageRangesOptions{}, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Range: exported.FormatHTTPRange(o.Range), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPageRangesDiffOptions contains the optional parameters for the Client.NewGetPageRangesDiffPager method. +type GetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify MaxResults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by MaxResults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The PrevSnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by PrevSnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Range specifies a range of bytes. The default value is all bytes. + Range blob.HTTPRange + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + AccessConditions *blob.AccessConditions +} + +func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRangesDiffOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &generated.PageBlobClientGetPageRangesDiffOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + PrevSnapshotURL: o.PrevSnapshotURL, + Prevsnapshot: o.PrevSnapshot, + Range: exported.FormatHTTPRange(o.Range), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions + +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ResizeOptions contains the optional parameters for the Client.Resize method. +type ResizeOptions struct { + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + AccessConditions *blob.AccessConditions +} + +func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions, + *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return nil, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UpdateSequenceNumberOptions contains the optional parameters for the Client.UpdateSequenceNumber method. +type UpdateSequenceNumberOptions struct { + ActionType *SequenceNumberActionType + + SequenceNumber *int64 + + AccessConditions *blob.AccessConditions +} + +func (o *UpdateSequenceNumberOptions) format() (*generated.SequenceNumberActionType, *generated.PageBlobClientUpdateSequenceNumberOptions, + *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &generated.PageBlobClientUpdateSequenceNumberOptions{ + BlobSequenceNumber: o.SequenceNumber, + } + + if *o.ActionType == SequenceNumberActionTypeIncrement { + options.BlobSequenceNumber = nil + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyIncrementalOptions contains the optional parameters for the Client.StartCopyIncremental method. +type CopyIncrementalOptions struct { + ModifiedAccessConditions *blob.ModifiedAccessConditions +} + +func (o *CopyIncrementalOptions) format() (*generated.PageBlobClientCopyIncrementalOptions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go new file mode 100644 index 00000000000..876efbab1d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/responses.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package pageblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.PageBlobClientCreateResponse + +// UploadPagesResponse contains the response from method Client.UploadPages. +type UploadPagesResponse = generated.PageBlobClientUploadPagesResponse + +// UploadPagesFromURLResponse contains the response from method Client.UploadPagesFromURL. +type UploadPagesFromURLResponse = generated.PageBlobClientUploadPagesFromURLResponse + +// ClearPagesResponse contains the response from method Client.ClearPages. +type ClearPagesResponse = generated.PageBlobClientClearPagesResponse + +// GetPageRangesResponse contains the response from method Client.NewGetPageRangesPager. +type GetPageRangesResponse = generated.PageBlobClientGetPageRangesResponse + +// GetPageRangesDiffResponse contains the response from method Client.NewGetPageRangesDiffPager. +type GetPageRangesDiffResponse = generated.PageBlobClientGetPageRangesDiffResponse + +// ResizeResponse contains the response from method Client.Resize. +type ResizeResponse = generated.PageBlobClientResizeResponse + +// UpdateSequenceNumberResponse contains the response from method Client.UpdateSequenceNumber. +type UpdateSequenceNumberResponse = generated.PageBlobClientUpdateSequenceNumberResponse + +// CopyIncrementalResponse contains the response from method Client.StartCopyIncremental. +type CopyIncrementalResponse = generated.PageBlobClientCopyIncrementalResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go new file mode 100644 index 00000000000..86b05d098f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/responses.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = service.CreateContainerResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = service.DeleteContainerResponse + +// DeleteBlobResponse contains the response from method blob.Client.Delete. +type DeleteBlobResponse = blob.DeleteResponse + +// UploadResponse contains the response from method blockblob.Client.CommitBlockList. +type UploadResponse = blockblob.CommitBlockListResponse + +// DownloadStreamResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. +type DownloadStreamResponse = blob.DownloadStreamResponse + +// ListBlobsFlatResponse contains the response from method container.Client.ListBlobFlatSegment. +type ListBlobsFlatResponse = container.ListBlobsFlatResponse + +// ListContainersResponse contains the response from method service.Client.ListContainersSegment. +type ListContainersResponse = service.ListContainersResponse + +// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadBufferResponse = blockblob.UploadBufferResponse + +// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile. +type UploadFileResponse = blockblob.UploadFileResponse + +// UploadStreamResponse contains the response from method Client.CommitBlockList. +type UploadStreamResponse = blockblob.CommitBlockListResponse + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse = generated.ListContainersSegmentResponse + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go new file mode 100644 index 00000000000..4069bb13202 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -0,0 +1,229 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + EncryptionScope string `param:"ses"` +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + "b", // blob service + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + v.EncryptionScope, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, + + // Account-specific SAS parameters + services: "b", // will always be "b" + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. +type AccountPermissions struct { + Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSignatureValues' Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.PermanentDelete { + buffer.WriteRune('y') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.SetImmutabilityPolicy { + buffer.WriteRune('i') + } + return buffer.String() +} + +// Parse initializes the AccountPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'y': + p.PermanentDelete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSignatureValues' ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) + } + } + return rt, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go new file mode 100644 index 00000000000..20f9875a965 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -0,0 +1,452 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "net" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 +) + +var ( + // Version is the default version encoded in the SAS token. + Version = generated.ServiceVersion +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol. + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol. + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(exported.SnapshotTimeFormat) + } + return ss, se, sh +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + authorizedObjectID string `param:"saoid"` + unauthorizedObjectID string `param:"suoid"` + correlationID string `param:"scid"` + encryptionScope string `param:"ses"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// AuthorizedObjectID returns authorizedObjectID. +func (p *QueryParameters) AuthorizedObjectID() string { + return p.authorizedObjectID +} + +// UnauthorizedObjectID returns unauthorizedObjectID. +func (p *QueryParameters) UnauthorizedObjectID() string { + return p.unauthorizedObjectID +} + +// SignedCorrelationID returns signedCorrelationID. +func (p *QueryParameters) SignedCorrelationID() string { + return p.correlationID +} + +// EncryptionScope returns encryptionScope +func (p *QueryParameters) EncryptionScope() string { + return p.encryptionScope +} + +// SignedOID returns signedOID. +func (p *QueryParameters) SignedOID() string { + return p.signedOID +} + +// SignedTID returns signedTID. +func (p *QueryParameters) SignedTID() string { + return p.signedTID +} + +// SignedStart returns signedStart. +func (p *QueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry. +func (p *QueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService. +func (p *QueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion. +func (p *QueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime. +func (p *QueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version. +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services. +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes. +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol. +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime. +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime. +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange. +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier. +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource. +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions. +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature. +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl. +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition. +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding. +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage. +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns contentType. +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth. +func (p *QueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOID != "" { + v.Add("skoid", p.signedOID) + v.Add("sktid", p.signedTID) + v.Add("skt", p.signedStart.Format(TimeFormat)) + v.Add("ske", p.signedExpiry.Format(TimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.authorizedObjectID != "" { + v.Add("saoid", p.authorizedObjectID) + } + if p.unauthorizedObjectID != "" { + v.Add("suoid", p.unauthorizedObjectID) + } + if p.correlationID != "" { + v.Add("scid", p.correlationID) + } + if p.encryptionScope != "" { + v.Add("ses", p.encryptionScope) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(TimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(TimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.authorizedObjectID = val + case "suoid": + p.unauthorizedObjectID = val + case "scid": + p.correlationID = val + case "ses": + p.encryptionScope = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go new file mode 100644 index 00000000000..45f730847d2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -0,0 +1,472 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// For more information on creating user delegation sas, see https://docs.microsoft.com/rest/api/storageservices/create-user-delegation-sas +type BlobSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing ContainerPermissions or BlobPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + AuthorizedObjectID string // saoid + UnauthorizedObjectID string // suoid + CorrelationID string // scid + EncryptionScope string `param:"ses"` +} + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") + } + + // Parse the resource + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + signedIdentifier := v.Identifier + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.EncryptionScope, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters. +func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) { + if userDelegationCredential == nil { + return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") + } + + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions") + } + + // Parse the resource + resource := "c" + if !v.SnapshotTime.IsZero() { + resource = "bs" + } else if v.BlobVersion != "" { + resource = "bv" + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + } else if v.BlobName == "" { + // do nothing + } else { + resource = "b" + } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + udk := exported.GetUDKParams(userDelegationCredential) + + udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{}) + + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory), + *udk.SignedOID, + *udk.SignedTID, + udkStart, + udkExpiry, + *udk.SignedService, + *udk.SignedVersion, + v.AuthorizedObjectID, + v.UnauthorizedObjectID, + v.CorrelationID, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.EncryptionScope, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + authorizedObjectID: v.AuthorizedObjectID, + unauthorizedObjectID: v.UnauthorizedObjectID, + correlationID: v.CorrelationID, + // Calculated SAS signature + signature: signature, + } + + //User delegation SAS specific parameters + p.signedOID = *udk.SignedOID + p.signedTID = *udk.SignedTID + p.signedStart = *udk.SignedStart + p.signedExpiry = *udk.SignedExpiry + p.signedService = *udk.SignedService + p.signedVersion = *udk.SignedVersion + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type ContainerPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, FilterByTags, Move, SetImmutabilityPolicy bool + Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSignatureValues' Permissions field. +func (p *ContainerPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.FilterByTags { + b.WriteRune('f') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + if p.SetImmutabilityPolicy { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes ContainerPermissions' fields from a string. +func parseContainerPermissions(s string) (ContainerPermissions, error) { + p := ContainerPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} + +// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. +type BlobPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Tag, Move, Execute, Ownership, Permissions, SetImmutabilityPolicy bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSignatureValues' Permissions field. +func (p *BlobPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.PermanentDelete { + b.WriteRune('y') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + if p.SetImmutabilityPolicy { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes BlobPermissions' fields from a string. +func parseBlobPermissions(s string) (BlobPermissions, error) { + p := BlobPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'y': + p.PermanentDelete = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go similarity index 65% rename from vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go index ff055865c8a..57fe053f07a 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -1,72 +1,64 @@ -package azblob +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas import ( - "net" "net/url" "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( - snapshot = "snapshot" - versionId = "versionid" - SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" + snapshot = "snapshot" + versionId = "versionid" ) -// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an -// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. // NOTE: Changing any SAS-related field requires computing a new SAS signature. -type BlobURLParts struct { +type URLParts struct { Scheme string // Ex: "https://" Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" IPEndpointStyleInfo IPEndpointStyleInfo ContainerName string // "" if no container BlobName string // "" if no blob Snapshot string // "" if not a snapshot - SAS SASQueryParameters + SAS QueryParameters UnparsedParams string VersionID string // "" if not versioning enabled } -// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. -// Ex: "https://10.132.141.33/accountname/containername" -type IPEndpointStyleInfo struct { - AccountName string // "" if not using IP endpoint style -} - -// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: -// http(s)://IP(:port)/storageaccount/container/... -// As url's Host property, host could be both host or host:port -func isIPEndpointStyle(host string) bool { - if host == "" { - return false +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return URLParts{}, err } - if h, _, err := net.SplitHostPort(host); err == nil { - host = h - } - // For IPv6, there could be case where SplitHostPort fails for cannot finding port. - // In this case, eliminate the '[' and ']' in the URL. - // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 - if host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - return net.ParseIP(host) != nil -} -// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other -// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. -func NewBlobURLParts(u url.URL) BlobURLParts { - up := BlobURLParts{ - Scheme: u.Scheme, - Host: u.Host, + up := URLParts{ + Scheme: uri.Scheme, + Host: uri.Host, } // Find the container & blob names (if any) - if u.Path != "" { - path := u.Path + if uri.Path != "" { + path := uri.Path if path[0] == '/' { path = path[1:] // If path starts with a slash, remove it } - if isIPEndpointStyle(up.Host) { + if shared.IsIPEndpointStyle(up.Host) { if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob up.IPEndpointStyleInfo.AccountName = path path = "" // No ContainerName present in the URL so path should be empty @@ -86,43 +78,33 @@ func NewBlobURLParts(u url.URL) BlobURLParts { } // Convert the query parameters to a case-sensitive map & trim whitespace - paramsMap := u.Query() + paramsMap := uri.Query() - up.Snapshot = "" // Assume no snapshot - up.VersionID = "" // Assume no versionID + up.Snapshot = "" // Assume no snapshot if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { up.Snapshot = snapshotStr[0] // If we recognized the query parameter, remove it from the map delete(paramsMap, snapshot) } + up.VersionID = "" // Assume no versionID if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { up.VersionID = versionIDs[0] // If we recognized the query parameter, remove it from the map delete(paramsMap, versionId) // delete "versionid" from paramsMap delete(paramsMap, "versionId") // delete "versionId" from paramsMap } - up.SAS = newSASQueryParameters(paramsMap, true) - up.UnparsedParams = paramsMap.Encode() - return up -} -type caseInsensitiveValues url.Values // map[string][]string -func (values caseInsensitiveValues) Get(key string) ([]string, bool) { - key = strings.ToLower(key) - for k, v := range values { - if strings.ToLower(k) == key { - return v, true - } - } - return []string{}, false + up.SAS = NewQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil } -// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery // field contains the SAS, snapshot, and unparsed query parameters. -func (up BlobURLParts) URL() url.URL { +func (up URLParts) String() string { path := "" - if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { path += "/" + up.IPEndpointStyleInfo.AccountName } // Concatenate container & blob names (if they exist) @@ -136,26 +118,25 @@ func (up BlobURLParts) URL() url.URL { rawQuery := up.UnparsedParams //If no snapshot is initially provided, fill it in from the SAS query properties to help the user - if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { - up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) + if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { + up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) } - // Concatenate blob snapshot query parameter (if it exists) - if up.Snapshot != "" { + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { if len(rawQuery) > 0 { rawQuery += "&" } - rawQuery += snapshot + "=" + up.Snapshot + rawQuery += versionId + "=" + up.VersionID } - // Concatenate blob version id query parameter (if it exists) - if up.VersionID != "" { + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { if len(rawQuery) > 0 { rawQuery += "&" } - rawQuery += versionId + "=" + up.VersionID + rawQuery += snapshot + "=" + up.Snapshot } - sas := up.SAS.Encode() if sas != "" { if len(rawQuery) > 0 { @@ -169,5 +150,17 @@ func (up BlobURLParts) URL() url.URL { Path: path, RawQuery: rawQuery, } - return u + return u.String() +} + +type caseInsensitiveValues url.Values // map[string][]string + +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go new file mode 100644 index 00000000000..924fd1081fe --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(containerName string, blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(containerName string, blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go new file mode 100644 index 00000000000..cf39c3d579e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -0,0 +1,377 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. +type Client base.Client[generated.ServiceClient] + +// NewClient creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { + audience := base.GetAudience((*base.ClientOptions)(options)) + conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewServiceClient(serviceURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +// GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. +// OAuth is required for this call, as well as any role that can delegate access to the storage account. +func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, o *GetUserDelegationCredentialOptions) (*UserDelegationCredential, error) { + url, err := blob.ParseURL(s.URL()) + if err != nil { + return nil, err + } + + getUserDelegationKeyOptions := o.format() + udk, err := s.generated().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions) + if err != nil { + return nil, err + } + + return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil +} + +func (s *Client) generated() *generated.ServiceClient { + return base.InnerClient((*base.Client[generated.ServiceClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ServiceClient])(s)) +} + +func (s *Client) credential() any { + return base.Credential((*base.Client[generated.ServiceClient])(s)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + +func (s *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ServiceClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewContainerClient creates a new container.Client object by concatenating containerName to the end of +// this Client's URL. The new container.Client uses the same request policy pipeline as the Client. +func (s *Client) NewContainerClient(containerName string) *container.Client { + containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(exported.ModuleName), s.credential(), s.getClientOptions())) +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (s *Client) CreateContainer(ctx context.Context, containerName string, options *CreateContainerOptions) (CreateContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerCreateResp, err := containerClient.Create(ctx, options) + return containerCreateResp, err +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (s *Client) DeleteContainer(ctx context.Context, containerName string, options *DeleteContainerOptions) (DeleteContainerResponse, error) { + containerClient := s.NewContainerClient(containerName) + containerDeleteResp, err := containerClient.Delete(ctx, options) + return containerDeleteResp, err +} + +// RestoreContainer restores soft-deleted container +// Operation will only be successful if used within the specified number of days set in the delete retention policy +func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName string, deletedContainerVersion string, options *RestoreContainerOptions) (RestoreContainerResponse, error) { + containerClient := s.NewContainerClient(deletedContainerName) + containerRestoreResp, err := containerClient.Restore(ctx, deletedContainerVersion, options) + return containerRestoreResp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + +// NewListContainersPager operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] { + listOptions := generated.ServiceClientListContainersSegmentOptions{} + if o != nil { + if o.Include.Deleted { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeDeleted) + } + if o.Include.Metadata { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) + } + if o.Include.System { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeSystem) + } + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListContainersResponse]{ + More: func(page ListContainersResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListContainersResponse) (ListContainersResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListContainersResponse{}, err + } + resp, err := s.generated().InternalClient().Pipeline().Do(req) + if err != nil { + return ListContainersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListContainersResponse{}, runtime.NewResponseError(resp) + } + return s.generated().ListContainersSegmentHandleResponse(resp) + }, + }) +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (s *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { + getPropertiesOptions := o.format() + resp, err := s.generated().GetProperties(ctx, getPropertiesOptions) + return resp, err +} + +// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. +func (s *Client) SetProperties(ctx context.Context, o *SetPropertiesOptions) (SetPropertiesResponse, error) { + properties, setPropertiesOptions := o.format() + resp, err := s.generated().SetProperties(ctx, properties, setPropertiesOptions) + return resp, err +} + +// GetStatistics Retrieves statistics related to replication for the Blob service. +// It is only available when read-access geo-redundant replication is enabled for the storage account. +// With geo-redundant replication, Azure Storage maintains your data durable +// in two locations. In both locations, Azure Storage constantly maintains +// multiple healthy replicas of your data. The location where you read, +// create, update, or delete data is the primary storage account location. +// The primary location exists in the region you choose at the time you +// create an account via the Azure Management Azure classic portal, for +// example, North Central US. The location to which your data is replicated +// is the secondary location. The secondary location is automatically +// determined based on the location of the primary; it is in a second data +// center that resides in the same region as the primary location. Read-only +// access is available from the secondary location, if read-access geo-redundant +// replication is enabled for your storage account. +func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (GetStatisticsResponse, error) { + getStatisticsOptions := o.format() + resp, err := s.generated().GetStatistics(ctx, getStatisticsOptions) + + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if s.sharedKey() == nil { + return "", bloberror.MissingSharedKeyCredential + } + st := o.format() + qps, err := sas.AccountSignatureValues{ + Version: sas.Version, + Permissions: permissions.String(), + ResourceTypes: resources.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + // add a trailing slash to be consistent with the portal + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} + +// FilterBlobs operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (s *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + serviceFilterBlobsOptions := o.format() + resp, err := s.generated().FilterBlobs(ctx, where, serviceFilterBlobsOptions) + return resp, err +} + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +// NOTE: Service level Blob Batch operation is supported only when the Client was created using SharedKeyCredential and Account SAS. +func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := s.credential().(type) { + case *azcore.TokenCredential: + conOptions := s.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: s.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (s *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := s.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go new file mode 100644 index 00000000000..20665fc2b79 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/constants.go @@ -0,0 +1,92 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS +type SKUName = generated.SKUName + +const ( + SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS + SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS + SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS + SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS + SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return generated.PossibleSKUNameValues() +} + +// ListContainersIncludeType defines values for ListContainersIncludeType +type ListContainersIncludeType = generated.ListContainersIncludeType + +const ( + ListContainersIncludeTypeMetadata ListContainersIncludeType = generated.ListContainersIncludeTypeMetadata + ListContainersIncludeTypeDeleted ListContainersIncludeType = generated.ListContainersIncludeTypeDeleted + ListContainersIncludeTypeSystem ListContainersIncludeType = generated.ListContainersIncludeTypeSystem +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return generated.PossibleListContainersIncludeTypeValues() +} + +// AccountKind defines values for AccountKind +type AccountKind = generated.AccountKind + +const ( + AccountKindStorage AccountKind = generated.AccountKindStorage + AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage + AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2 + AccountKindFileStorage AccountKind = generated.AccountKindFileStorage + AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return generated.PossibleAccountKindValues() +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus = generated.BlobGeoReplicationStatus + +const ( + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusLive + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusBootstrap + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusUnavailable +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return generated.PossibleBlobGeoReplicationStatusValues() +} + +// PublicAccessType defines values for AccessType - private (default) or blob or container +type PublicAccessType = generated.PublicAccessType + +const ( + PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob + PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return generated.PossiblePublicAccessTypeValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go new file mode 100644 index 00000000000..b70724d7974 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -0,0 +1,361 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// UserDelegationCredential contains an account's name and its user delegation key. +type UserDelegationCredential = exported.UserDelegationCredential + +// UserDelegationKey contains UserDelegationKey. +type UserDelegationKey = generated.UserDelegationKey + +// KeyInfo contains KeyInfo struct. +type KeyInfo = generated.KeyInfo + +// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method. +type GetUserDelegationCredentialOptions struct { + // placeholder for future options +} + +func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGetUserDelegationKeyOptions { + return nil +} + +// AccessConditions identifies container-specific access conditions which you optionally set. +type AccessConditions = exported.ContainerAccessConditions + +// BlobTag - a key/value pair on a blob +type BlobTag = generated.BlobTag + +// ContainerItem - An Azure Storage container returned from method Client.ListContainersSegment. +type ContainerItem = generated.ContainerItem + +// ContainerProperties - Properties of a container +type ContainerProperties = generated.ContainerProperties + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo = generated.CPKInfo + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo = generated.CPKScopeInfo + +// CreateContainerOptions contains the optional parameters for the container.Client.Create method. +type CreateContainerOptions = container.CreateOptions + +// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method. +type DeleteContainerOptions = container.DeleteOptions + +// RestoreContainerOptions contains the optional parameters for the container.Client.Restore method. +type RestoreContainerOptions = container.RestoreOptions + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain. +type CORSRule = generated.CORSRule + +// FilterBlobSegment - The result of a Filter Blobs API call. +type FilterBlobSegment = generated.FilterBlobSegment + +// BlobTags - Blob tags +type BlobTags = generated.BlobTags + +// FilterBlobItem - Blob info returned from method Client.FilterBlobs. +type FilterBlobItem = generated.FilterBlobItem + +// GeoReplication - Geo-Replication information for the Secondary Storage Service. +type GeoReplication = generated.GeoReplication + +// RetentionPolicy - the retention policy which determines how long the associated data should persist. +type RetentionPolicy = generated.RetentionPolicy + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs. +type Metrics = generated.Metrics + +// Logging - Azure Analytics Logging settings. +type Logging = generated.Logging + +// StaticWebsite - The properties that enable an account to host a static website. +type StaticWebsite = generated.StaticWebsite + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties = generated.StorageServiceProperties + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats = generated.StorageServiceStats + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ServiceClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // placeholder for future options +} + +func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListContainersOptions provides set of configurations for ListContainers operation. +type ListContainersOptions struct { + Include ListContainersInclude + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing operation did not return all containers + // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in + // a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, + // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible + // that the service will return fewer results than specified by max results, or than the default of 5000. + MaxResults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +// ListContainersInclude indicates what additional information the service should return with each container. +type ListContainersInclude struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Tells the service whether to return soft-deleted containers. + Deleted bool + + // Tells the service whether to return system containers. + System bool +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions provides set of options for Client.SetProperties +type SetPropertiesOptions struct { + // The set of CORS rules. + CORS []*CORSRule + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions. + DefaultServiceVersion *string + + // the retention policy which determines how long the associated data should persist. + DeleteRetentionPolicy *RetentionPolicy + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + // If version is not set - we default to "1.0" + HourMetrics *Metrics + + // Azure Analytics Logging settings. + // If version is not set - we default to "1.0" + Logging *Logging + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + // If version is not set - we default to "1.0" + MinuteMetrics *Metrics + + // The properties that enable an account to host a static website. + StaticWebsite *StaticWebsite +} + +func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { + if o == nil { + return generated.StorageServiceProperties{}, nil + } + + defaultVersion := to.Ptr[string]("1.0") + defaultAge := to.Ptr[int32](0) + emptyStr := to.Ptr[string]("") + + if o.CORS != nil { + for i := 0; i < len(o.CORS); i++ { + if o.CORS[i].AllowedHeaders == nil { + o.CORS[i].AllowedHeaders = emptyStr + } + if o.CORS[i].ExposedHeaders == nil { + o.CORS[i].ExposedHeaders = emptyStr + } + if o.CORS[i].MaxAgeInSeconds == nil { + o.CORS[i].MaxAgeInSeconds = defaultAge + } + } + } + + if o.HourMetrics != nil { + if o.HourMetrics.Version == nil { + o.HourMetrics.Version = defaultVersion + } + } + + if o.Logging != nil { + if o.Logging.Version == nil { + o.Logging.Version = defaultVersion + } + } + + if o.MinuteMetrics != nil { + if o.MinuteMetrics.Version == nil { + o.MinuteMetrics.Version = defaultVersion + } + + } + + return generated.StorageServiceProperties{ + CORS: o.CORS, + DefaultServiceVersion: o.DefaultServiceVersion, + DeleteRetentionPolicy: o.DeleteRetentionPolicy, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + StaticWebsite: o.StaticWebsite, + }, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetStatisticsOptions provides set of options for Client.GetStatistics +type GetStatisticsOptions struct { + // placeholder for future options +} + +func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FindBlobsByTags. +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 +} + +func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ServiceClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ServiceClientSubmitBatchOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go new file mode 100644 index 00000000000..2dbf9716541 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go @@ -0,0 +1,63 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" +) + +// CreateContainerResponse contains the response from method container.Client.Create. +type CreateContainerResponse = generated.ContainerClientCreateResponse + +// DeleteContainerResponse contains the response from method container.Client.Delete +type DeleteContainerResponse = generated.ContainerClientDeleteResponse + +// RestoreContainerResponse contains the response from method container.Client.Restore +type RestoreContainerResponse = generated.ContainerClientRestoreResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse + +// ListContainersResponse contains the response from method Client.ListContainersSegment. +type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse = generated.ListContainersSegmentResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse + +// GetStatisticsResponse contains the response from method Client.GetStatistics. +type GetStatisticsResponse = generated.ServiceClientGetStatisticsResponse + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse + +// GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json new file mode 100644 index 00000000000..c6259f7ab02 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/test-resources.json @@ -0,0 +1,579 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2022-09-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "immutableAccountName": "[concat(parameters('baseName'), 'imm')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('immutableAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot", + "immutableStorageWithVersioning": { + "enabled": true + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('immutableAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('immutableAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "deleteRetentionPolicy": { + "allowPermanentDelete": true, + "enabled": true, + "days": 1 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('immutableAccountName')]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "IMMUTABLE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/CHANGELOG.md new file mode 100644 index 00000000000..af4b82990e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/CHANGELOG.md @@ -0,0 +1,14 @@ +## Release History + +### 1.0.0 (2023-05-09) + +### Features Added + +* This is the initial GA release of the `azqueue` library + + +### 0.1.0 (2023-02-15) + +### Features Added + +* This is the initial preview release of the `azqueue` library diff --git a/vendor/github.com/Azure/azure-event-hubs-go/v3/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/LICENSE.txt similarity index 98% rename from vendor/github.com/Azure/azure-event-hubs-go/v3/LICENSE rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/LICENSE.txt index 21071075c24..d1ca00f20a8 100644 --- a/vendor/github.com/Azure/azure-event-hubs-go/v3/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/LICENSE.txt @@ -18,4 +18,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/README.md new file mode 100644 index 00000000000..0877f9ae763 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/README.md @@ -0,0 +1,241 @@ +# Azure Queue Storage SDK for Go + +> Service Version: 2018-03-28 + +Azure Queue storage is a service for storing large numbers of messages that can be accessed from anywhere in +the world via authenticated calls using HTTP or HTTPS. +A single queue message can be up to 64 KiB in size, and a queue can contain millions of messages, +up to the total capacity limit of a storage account. + +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] + +## Getting started + +### Install the package + +Install the Azure Queue Storage SDK for Go with [go get][goget]: + +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue +``` + +If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +### Prerequisites + +A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). + +You need an [Azure subscription][azure_sub] and a +[Storage Account][storage_account_docs] to use this package. + +To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: + +```Powershell +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS +``` + +### Authenticate the client + +In order to interact with the Azure Queue Storage service, you'll need to create an instance of the `azqueue.ServiceClient` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. + +```go +// create a credential for authenticating with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle err + +// create an azqueue.ServiceClient for the specified storage account that uses the above credential +client, err := azqueue.NewServiceClient("https://MYSTORAGEACCOUNT.queue.core.windows.net/", cred, nil) +// TODO: handle err +``` + +Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). + +## Key concepts +The following components make up the Azure Queue Service: +* The storage account itself +* A queue within the storage account, which contains a set of messages +* A message within a queue, in any format, of up to 64 KiB + +The Azure Storage Queues client library for GO allows you to interact with each of these components through the +use of a dedicated client object. + +### Clients +Two different clients are provided to interact with the various components of the Queue Service: +1. ServiceClient - + this client represents interaction with the Azure storage account itself, and allows you to acquire preconfigured + client instances to access the queues within. It provides operations to retrieve and configure the account + properties as well as list, create, and delete queues within the account. To perform operations on a specific queue, + retrieve a client using the `NewQueueClient` method. +2. QueueClient - + this client represents interaction with a specific queue (which need not exist yet). It provides operations to + create, delete, or configure a queue and includes operations to enqueue, dequeue, peek, delete, and update messages + within it. + +### Messages +* **Enqueue** - Adds a message to the queue and optionally sets a visibility timeout for the message. +* **Dequeue** - Retrieves a message from the queue and makes it invisible to other consumers. +* **Peek** - Retrieves a message from the front of the queue, without changing the message visibility. +* **Update** - Updates the visibility timeout of a message and/or the message contents. +* **Delete** - Deletes a specified message from the queue. +* **Clear** - Clears all messages from the queue. + +### Goroutine safety +We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. + +### About Queue metadata +Queue metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. + +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + + +## Examples + +### Queue Manipulation + +```go +const ( + accountName = "MYSTORAGEACCOUNT" + accountKey = "ACCOUNT_KEY" + queueName = "samplequeue" +) +``` + +### Exploring Queue Service APIs + +```go +// shared key credential set up +cred := azqueue.NewSharedKeyCredential(accountName, accountKey) + +// instantiate service client +serviceClient, err := azqueue.NewServiceClientWithSharedKeyCredential(account, cred, nil) +// TODO: handle error + +// 1. create queue +queueClient := serviceClient.NewQueueClient(queueName) +_, err = queueClient.Create(context.TODO(), nil) +// TODO: handle error + +// 2. enqueue message +_, err = queueClient.EnqueueMessage(context.TODO(), message, nil) +// TODO: handle error + +// 3. dequeue message +_, err = queueClient.DequeueMessage(context.TODO(), nil) +// TODO: handle error + +// 4. delete queue +_, err =queueClient.Delete(context.TODO(), nil) +// TODO: handle error +``` + +### Enumerating queues + +```go +const ( + account = "https://MYSTORAGEACCOUNT.queue.core.windows.net/" +) + +// authenticate with Azure Active Directory +cred, err := azidentity.NewDefaultAzureCredential(nil) +// TODO: handle error + +// create a client for the specified storage account +client, err := azqueue.NewServiceClient(account, cred, nil) +// TODO: handle error + +// queue listings are returned across multiple pages +pager := client.NewListQueuesPager(nil) + +// continue fetching pages until no more remain +for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.Nil(err) + // print queue name + for _, queue := range resp.Queues { + fmt.Println(*queue.Name) + } +} +``` + +## Troubleshooting + +All queue service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [queueerror][queue_error] package provides the possible Storage error codes +along with various helper facilities for error handling. + +```go +const ( + connectionString = "" + queueName = "samplequeue" +) + +// create a client with the provided connection string +client, err := azqueue.NewServiceClientFromConnectionString(connectionString, nil) +// TODO: handle error + +// try to delete the queue, avoiding any potential race conditions with an in-progress or completed deletion +_, err = client.DeleteQueue(context.TODO(), queueName, nil) + +if queueerror.HasCode(err, queueerror.QueueBeingDeleted, queueerror.QueueNotFound) { + // ignore any errors if the queue is being deleted or already has been deleted +} else if err != nil { + // TODO: some other error +} +``` + +## Next steps + +Get started with our [Queue samples][samples]. They contain complete examples of the above snippets and more. + +## Contributing + +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require you to agree to a [Contributor License Agreement (CLA)][cla] declaring that you have the right to, and actually do, grant us the rights to use your contribution. + +If you'd like to contribute to this library, please read the [contributing guide] [contributing_guide] to learn more about how to build and test the code. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For more information, see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. + + + +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/queue-service-rest-api +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://docs.microsoft.com/cli/azure +[azure_sub]: https://azure.microsoft.com/free/ +[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity +[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[samples]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue/samples_test.go +[queue_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue/queueerror/error_codes.go +[queue]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue/queue_client.go +[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue/sas +[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue/service_client.go +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[contributing_guide]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/assets.json new file mode 100644 index 00000000000..3b1cfb256fa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azqueue", + "Tag": "go/storage/azqueue_250a75f53b" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/ci.yml new file mode 100644 index 00000000000..678839955e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/ci.yml @@ -0,0 +1,28 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azqueue + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azqueue + + +stages: + - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azqueue' + RunLiveTests: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/constants.go new file mode 100644 index 00000000000..7e77b10c872 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/constants.go @@ -0,0 +1,18 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azqueue + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated" + +// GeoReplicationStatus - The status of the secondary location +type GeoReplicationStatus = generated.GeoReplicationStatus + +const ( + GeoReplicationStatusLive GeoReplicationStatus = generated.GeoReplicationStatusLive + GeoReplicationStatusBootstrap GeoReplicationStatus = generated.GeoReplicationStatusBootstrap + GeoReplicationStatusUnavailable GeoReplicationStatus = generated.GeoReplicationStatusUnavailable +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/doc.go new file mode 100644 index 00000000000..004d7a64ea2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/doc.go @@ -0,0 +1,168 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +/* + +Package azqueue can access an Azure Queue Storage. + +The azqueue package is capable of :- + - Creating, deleting, and clearing queues in an account + - Enqueuing, dequeuing, and editing messages in a queue + - Creating Shared Access Signature for authentication + +Types of Resources + +The azqueue package allows you to interact with three types of resources :- + +* Azure storage accounts. +* Queues within those storage accounts. +* Messages within those queues. + +The Azure Queue Storage (azqueue) client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's queue service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azqueue library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String, +or authorization with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.queue.core.windows.net/", accountName) + + cred, err := azqueue.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := azqueue.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + + fmt.Println(serviceClient.URL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := azqueue.NewServiceClientFromConnectionString(connStr, nil) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASURL() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.queue.core.windows.net/", accountName) + + cred, err := azqueue.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := azqueue.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.URL()) + + // Alternatively, you can create SAS on the fly + + resources := azqueue.AccountResourceTypes{Service: true} + permission := azqueue.AccountSASPermissions{Read: true} + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, expiry, nil) + handle(err) + + serviceClientWithSAS, err := azqueue.NewServiceClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.URL()) + +Types of Clients + +There are two different clients provided to interact with the various components of the Queue Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, and delete queues within the account. + +2. **`QueueClient`** + * Get and set queue access settings and metadata. + * Enqueue, Dequeue and Peek messages within a queue. + * Update and Delete messages. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + cred, err := azqueue.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for queue endpoints is usually in the form: http(s)://.queue.core.windows.net/ + serviceClient, err := azqueue.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.queue.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a queue ===== + + // First, create a queue client, and use the Create method to create a new queue in your account + queueClient, err := serviceClient.NewQueueClient("testqueue") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, access, etc. + // If you want to use the default options, pass in nil. + _, err = queueClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Enqueue and Dequeue a message ===== + message := "Hello world!" + + // send message to queue + _, err = queueClient.EnqueueMessage(context.TODO(), message, nil) + handle(err) + + // dequeue message from queue, you can also use `DequeueMessage()` to dequeue more than one message (up to 32) + _, err = queueClient.DequeueMessage(context.TODO(), nil) + handle(err) + + // ===== 3. Peek messages ===== + // You can also peek messages from the queue (without removing them), you can peek a maximum of 32 messages. + + opts := azqueue.PeekMessagesOptions{NumberOfMessages: to.Ptr(int32(4))} + resp, err := queueClient.PeekMessages(context.TODO(), &opts) + + // Delete the queue. + _, err = queueClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azqueue diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/base/clients.go new file mode 100644 index 00000000000..a979ac5444d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/base/clients.go @@ -0,0 +1,59 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated" +) + +type Client[T any] struct { + inner *T + sharedKey *exported.SharedKeyCredential +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + return client.sharedKey +} + +func NewClient[T any](inner *T) *Client[T] { + return &Client[T]{inner: inner} +} + +func NewServiceClient(queueURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(queueURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewQueueClient(queueURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.QueueClient, generated.MessagesClient] { + return &CompositeClient[generated.QueueClient, generated.MessagesClient]{ + innerT: generated.NewQueueClient(queueURL, pipeline), + innerU: generated.NewMessagesClient(runtime.JoinPaths(queueURL, "messages"), pipeline), + sharedKey: sharedKey, + } +} + +type CompositeClient[T, U any] struct { + innerT *T + innerU *U + sharedKey *exported.SharedKeyCredential +} + +func InnerClients[T, U any](client *CompositeClient[T, U]) (*T, *U) { + return client.innerT, client.innerU +} + +func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential { + return client.sharedKey +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/access_policy.go new file mode 100644 index 00000000000..5b154e97e2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/access_policy.go @@ -0,0 +1,57 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "fmt" +) + +// AccessPolicyPermission type simplifies creating the permissions string for a queue's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Update, Process bool +} + +// String produces the access policy permission string for an Azure Storage queue. +// Call this method to set AccessPolicy's Permission field. +func (p *AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Update { + b.WriteRune('u') + } + if p.Process { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'u': + p.Update = true + case 'p': + p.Process = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/shared_key_credential.go new file mode 100644 index 00000000000..a8d648104ca --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/shared_key_credential.go @@ -0,0 +1,218 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := getHeader(shared.HeaderContentLength, headers) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), + contentLength, + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), + "", // Empty date because x-ms-date is expected (as per web page above) + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} +} + +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.computeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/version.go new file mode 100644 index 00000000000..abf66d14211 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "azqueue" + ModuleVersion = "v1.0.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/autorest.md new file mode 100644 index 00000000000..3d8be8e852f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/autorest.md @@ -0,0 +1,150 @@ +# Code Generation - Azure Queue SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.QueueStorage/preview/2018-03-28/queue.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: ../generated +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.45" +``` + +### Remove QueueName from parameter list since it is not needed + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{queueName}/messages/{messageid}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/QueueName") && false == param['$ref'].endsWith("#/parameters/MessageId"))}); + } + else if (property.includes('/{queueName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/QueueName"))}); + } + } +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "QueueGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Remove pager method (since we implement it ourselves on the client layer) and export various generated methods in service client to utilize them in higher layers + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListQueuesSegmentPager\(.+\/\/ listQueuesSegmentCreateRequest creates the ListQueuesSegment request/s, `// ListQueuesSegmentCreateRequest creates the ListQueuesFlatSegment ListQueuesSegment`). + replace(/\(client \*ServiceClient\) listQueuesSegmentCreateRequest\(/, `(client *ServiceClient) ListQueuesSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listQueuesSegmentHandleResponse\(/, `(client *ServiceClient) ListQueuesSegmentHandleResponse(`); +``` + +### Change `VisibilityTimeout` parameter in queues to be options + +``` yaml +directive: +- from: swagger-document + where: $.parameters.VisibilityTimeoutRequired + transform: > + $.required = false; +``` + +### Change CORS acronym to be all caps + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Remove `Item` suffix + +``` yaml +directive: +- rename-model: + from: DequeuedMessageItem + to: DequeuedMessage +- rename-model: + from: QueueItem + to: Queue +- rename-model: + from: PeekedMessageItem + to: PeekedMessage +``` + +### Remove `List` suffix + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/QueueMessagesList/g, "Messages"); +``` + +### Remove `Item` suffix + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/QueueItems/g, "Queues"); +``` + +### Remove `Queue` prefix + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/QueueGeoReplicationStatus/g, "GeoReplicationStatus"); +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/build.go new file mode 100644 index 00000000000..57f112001bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/queue_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/queue_client.go new file mode 100644 index 00000000000..0820c93c0df --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/queue_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *QueueClient) Endpoint() string { + return client.endpoint +} + +func (client *QueueClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/service_client.go new file mode 100644 index 00000000000..1f449b955e8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/service_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_constants.go new file mode 100644 index 00000000000..167981467b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_constants.go @@ -0,0 +1,154 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +// GeoReplicationStatus - The status of the secondary location +type GeoReplicationStatus string + +const ( + GeoReplicationStatusLive GeoReplicationStatus = "live" + GeoReplicationStatusBootstrap GeoReplicationStatus = "bootstrap" + GeoReplicationStatusUnavailable GeoReplicationStatus = "unavailable" +) + +// PossibleGeoReplicationStatusValues returns the possible values for the GeoReplicationStatus const type. +func PossibleGeoReplicationStatusValues() []GeoReplicationStatus { + return []GeoReplicationStatus{ + GeoReplicationStatusLive, + GeoReplicationStatusBootstrap, + GeoReplicationStatusUnavailable, + } +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMarker StorageErrorCode = "InvalidMarker" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMessageNotFound StorageErrorCode = "MessageNotFound" + StorageErrorCodeMessageTooLarge StorageErrorCode = "MessageTooLarge" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodePopReceiptMismatch StorageErrorCode = "PopReceiptMismatch" + StorageErrorCodeQueueAlreadyExists StorageErrorCode = "QueueAlreadyExists" + StorageErrorCodeQueueBeingDeleted StorageErrorCode = "QueueBeingDeleted" + StorageErrorCodeQueueDisabled StorageErrorCode = "QueueDisabled" + StorageErrorCodeQueueNotEmpty StorageErrorCode = "QueueNotEmpty" + StorageErrorCodeQueueNotFound StorageErrorCode = "QueueNotFound" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMarker, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMessageNotFound, + StorageErrorCodeMessageTooLarge, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodePopReceiptMismatch, + StorageErrorCodeQueueAlreadyExists, + StorageErrorCodeQueueBeingDeleted, + StorageErrorCodeQueueDisabled, + StorageErrorCodeQueueNotEmpty, + StorageErrorCodeQueueNotFound, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeServerBusy, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messageid_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messageid_client.go new file mode 100644 index 00000000000..dc85dd32663 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messageid_client.go @@ -0,0 +1,176 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "time" +) + +// MessageIDClient contains the methods for the MessageID group. +// Don't use this type directly, use NewMessageIDClient() instead. +type MessageIDClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewMessageIDClient creates a new instance of MessageIDClient with the specified values. +// - endpoint - The URL of the service account, queue or message that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewMessageIDClient(endpoint string, pl runtime.Pipeline) *MessageIDClient { + client := &MessageIDClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// Delete - The Delete operation deletes the specified message. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - popReceipt - Required. Specifies the valid pop receipt value returned from an earlier call to the Get Messages or Update +// Message operation. +// - options - MessageIDClientDeleteOptions contains the optional parameters for the MessageIDClient.Delete method. +func (client *MessageIDClient) Delete(ctx context.Context, popReceipt string, options *MessageIDClientDeleteOptions) (MessageIDClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, popReceipt, options) + if err != nil { + return MessageIDClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return MessageIDClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return MessageIDClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *MessageIDClient) deleteCreateRequest(ctx context.Context, popReceipt string, options *MessageIDClientDeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("popreceipt", popReceipt) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *MessageIDClient) deleteHandleResponse(resp *http.Response) (MessageIDClientDeleteResponse, error) { + result := MessageIDClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return MessageIDClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Update - The Update operation was introduced with version 2011-08-18 of the Queue service API. The Update Message operation +// updates the visibility timeout of a message. You can also use this operation to +// update the contents of a message. A message must be in a format that can be included in an XML request with UTF-8 encoding, +// and the encoded message can be up to 64KB in size. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - popReceipt - Required. Specifies the valid pop receipt value returned from an earlier call to the Get Messages or Update +// Message operation. +// - queueMessage - A Message object which can be stored in a Queue +// - options - MessageIDClientUpdateOptions contains the optional parameters for the MessageIDClient.Update method. +func (client *MessageIDClient) Update(ctx context.Context, popReceipt string, queueMessage QueueMessage, options *MessageIDClientUpdateOptions) (MessageIDClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, popReceipt, queueMessage, options) + if err != nil { + return MessageIDClientUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return MessageIDClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return MessageIDClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *MessageIDClient) updateCreateRequest(ctx context.Context, popReceipt string, queueMessage QueueMessage, options *MessageIDClientUpdateOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("popreceipt", popReceipt) + if options != nil && options.Visibilitytimeout != nil { + reqQP.Set("visibilitytimeout", strconv.FormatInt(int64(*options.Visibilitytimeout), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, queueMessage) +} + +// updateHandleResponse handles the Update response. +func (client *MessageIDClient) updateHandleResponse(resp *http.Response) (MessageIDClientUpdateResponse, error) { + result := MessageIDClientUpdateResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return MessageIDClientUpdateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-popreceipt"); val != "" { + result.PopReceipt = &val + } + if val := resp.Header.Get("x-ms-time-next-visible"); val != "" { + timeNextVisible, err := time.Parse(time.RFC1123, val) + if err != nil { + return MessageIDClientUpdateResponse{}, err + } + result.TimeNextVisible = &timeNextVisible + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messages_client.go new file mode 100644 index 00000000000..fd3f79ef8cf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_messages_client.go @@ -0,0 +1,299 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "time" +) + +// MessagesClient contains the methods for the Messages group. +// Don't use this type directly, use NewMessagesClient() instead. +type MessagesClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewMessagesClient creates a new instance of MessagesClient with the specified values. +// - endpoint - The URL of the service account, queue or message that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewMessagesClient(endpoint string, pl runtime.Pipeline) *MessagesClient { + client := &MessagesClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// Clear - The Clear operation deletes all messages from the specified queue. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - MessagesClientClearOptions contains the optional parameters for the MessagesClient.Clear method. +func (client *MessagesClient) Clear(ctx context.Context, options *MessagesClientClearOptions) (MessagesClientClearResponse, error) { + req, err := client.clearCreateRequest(ctx, options) + if err != nil { + return MessagesClientClearResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return MessagesClientClearResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return MessagesClientClearResponse{}, runtime.NewResponseError(resp) + } + return client.clearHandleResponse(resp) +} + +// clearCreateRequest creates the Clear request. +func (client *MessagesClient) clearCreateRequest(ctx context.Context, options *MessagesClientClearOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// clearHandleResponse handles the Clear response. +func (client *MessagesClient) clearHandleResponse(resp *http.Response) (MessagesClientClearResponse, error) { + result := MessagesClientClearResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return MessagesClientClearResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Dequeue - The Dequeue operation retrieves one or more messages from the front of the queue. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - MessagesClientDequeueOptions contains the optional parameters for the MessagesClient.Dequeue method. +func (client *MessagesClient) Dequeue(ctx context.Context, options *MessagesClientDequeueOptions) (MessagesClientDequeueResponse, error) { + req, err := client.dequeueCreateRequest(ctx, options) + if err != nil { + return MessagesClientDequeueResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return MessagesClientDequeueResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return MessagesClientDequeueResponse{}, runtime.NewResponseError(resp) + } + return client.dequeueHandleResponse(resp) +} + +// dequeueCreateRequest creates the Dequeue request. +func (client *MessagesClient) dequeueCreateRequest(ctx context.Context, options *MessagesClientDequeueOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.NumberOfMessages != nil { + reqQP.Set("numofmessages", strconv.FormatInt(int64(*options.NumberOfMessages), 10)) + } + if options != nil && options.Visibilitytimeout != nil { + reqQP.Set("visibilitytimeout", strconv.FormatInt(int64(*options.Visibilitytimeout), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// dequeueHandleResponse handles the Dequeue response. +func (client *MessagesClient) dequeueHandleResponse(resp *http.Response) (MessagesClientDequeueResponse, error) { + result := MessagesClientDequeueResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return MessagesClientDequeueResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return MessagesClientDequeueResponse{}, err + } + return result, nil +} + +// Enqueue - The Enqueue operation adds a new message to the back of the message queue. A visibility timeout can also be specified +// to make the message invisible until the visibility timeout expires. A message must +// be in a format that can be included in an XML request with UTF-8 encoding. The encoded message can be up to 64 KB in size +// for versions 2011-08-18 and newer, or 8 KB in size for previous versions. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - queueMessage - A Message object which can be stored in a Queue +// - options - MessagesClientEnqueueOptions contains the optional parameters for the MessagesClient.Enqueue method. +func (client *MessagesClient) Enqueue(ctx context.Context, queueMessage QueueMessage, options *MessagesClientEnqueueOptions) (MessagesClientEnqueueResponse, error) { + req, err := client.enqueueCreateRequest(ctx, queueMessage, options) + if err != nil { + return MessagesClientEnqueueResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return MessagesClientEnqueueResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return MessagesClientEnqueueResponse{}, runtime.NewResponseError(resp) + } + return client.enqueueHandleResponse(resp) +} + +// enqueueCreateRequest creates the Enqueue request. +func (client *MessagesClient) enqueueCreateRequest(ctx context.Context, queueMessage QueueMessage, options *MessagesClientEnqueueOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Visibilitytimeout != nil { + reqQP.Set("visibilitytimeout", strconv.FormatInt(int64(*options.Visibilitytimeout), 10)) + } + if options != nil && options.MessageTimeToLive != nil { + reqQP.Set("messagettl", strconv.FormatInt(int64(*options.MessageTimeToLive), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, queueMessage) +} + +// enqueueHandleResponse handles the Enqueue response. +func (client *MessagesClient) enqueueHandleResponse(resp *http.Response) (MessagesClientEnqueueResponse, error) { + result := MessagesClientEnqueueResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return MessagesClientEnqueueResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return MessagesClientEnqueueResponse{}, err + } + return result, nil +} + +// Peek - The Peek operation retrieves one or more messages from the front of the queue, but does not alter the visibility +// of the message. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - MessagesClientPeekOptions contains the optional parameters for the MessagesClient.Peek method. +func (client *MessagesClient) Peek(ctx context.Context, options *MessagesClientPeekOptions) (MessagesClientPeekResponse, error) { + req, err := client.peekCreateRequest(ctx, options) + if err != nil { + return MessagesClientPeekResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return MessagesClientPeekResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return MessagesClientPeekResponse{}, runtime.NewResponseError(resp) + } + return client.peekHandleResponse(resp) +} + +// peekCreateRequest creates the Peek request. +func (client *MessagesClient) peekCreateRequest(ctx context.Context, options *MessagesClientPeekOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("peekonly", "true") + if options != nil && options.NumberOfMessages != nil { + reqQP.Set("numofmessages", strconv.FormatInt(int64(*options.NumberOfMessages), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// peekHandleResponse handles the Peek response. +func (client *MessagesClient) peekHandleResponse(resp *http.Response) (MessagesClientPeekResponse, error) { + result := MessagesClientPeekResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return MessagesClientPeekResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return MessagesClientPeekResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models.go new file mode 100644 index 00000000000..560ae210d50 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models.go @@ -0,0 +1,430 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import "time" + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + + // the permissions for the acl policy + Permission *string `xml:"Permission"` + + // the date-time the policy is active + Start *time.Time `xml:"Start"` +} + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CORSRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// DequeuedMessage - The object returned in the QueueMessageList array when calling Get Messages on a Queue. +type DequeuedMessage struct { + // REQUIRED; The number of times the message has been dequeued. + DequeueCount *int64 `xml:"DequeueCount"` + + // REQUIRED; The time that the Message will expire and be automatically deleted. + ExpirationTime *time.Time `xml:"ExpirationTime"` + + // REQUIRED; The time the Message was inserted into the Queue. + InsertionTime *time.Time `xml:"InsertionTime"` + + // REQUIRED; The Id of the Message. + MessageID *string `xml:"MessageId"` + + // REQUIRED; The content of the Message. + MessageText *string `xml:"MessageText"` + + // REQUIRED; This value is required to delete the Message. If deletion fails using this popreceipt then the message has been + // dequeued by another client. + PopReceipt *string `xml:"PopReceipt"` + + // REQUIRED; The time that the message will again become visible in the Queue. + TimeNextVisible *time.Time `xml:"TimeNextVisible"` +} + +// EnqueuedMessage - The object returned in the QueueMessageList array when calling Put Message on a Queue +type EnqueuedMessage struct { + // REQUIRED; The time that the Message will expire and be automatically deleted. + ExpirationTime *time.Time `xml:"ExpirationTime"` + + // REQUIRED; The time the Message was inserted into the Queue. + InsertionTime *time.Time `xml:"InsertionTime"` + + // REQUIRED; The Id of the Message. + MessageID *string `xml:"MessageId"` + + // REQUIRED; This value is required to delete the Message. If deletion fails using this popreceipt then the message has been + // dequeued by another client. + PopReceipt *string `xml:"PopReceipt"` + + // REQUIRED; The time that the message will again become visible in the Queue. + TimeNextVisible *time.Time `xml:"TimeNextVisible"` +} + +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *GeoReplicationStatus `xml:"Status"` +} + +// ListQueuesSegmentResponse - The object returned when calling List Queues on a Queue Service. +type ListQueuesSegmentResponse struct { + // REQUIRED + MaxResults *int32 `xml:"MaxResults"` + + // REQUIRED + NextMarker *string `xml:"NextMarker"` + + // REQUIRED + Prefix *string `xml:"Prefix"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + Queues []*Queue `xml:"Queues>Queue"` +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` + + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` + + // REQUIRED; the retention policy + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` +} + +// MessageIDClientDeleteOptions contains the optional parameters for the MessageIDClient.Delete method. +type MessageIDClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// MessageIDClientUpdateOptions contains the optional parameters for the MessageIDClient.Update method. +type MessageIDClientUpdateOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 + // Optional. Specifies the new visibility timeout value, in seconds, relative to server time. The default value is 30 seconds. + // A specified value must be larger than or equal to 1 second, and cannot be + // larger than 7 days, or larger than 2 hours on REST protocol versions prior to version 2011-08-18. The visibility timeout + // of a message can be set to a value later than the expiry time. + Visibilitytimeout *int32 +} + +// MessagesClientClearOptions contains the optional parameters for the MessagesClient.Clear method. +type MessagesClientClearOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// MessagesClientDequeueOptions contains the optional parameters for the MessagesClient.Dequeue method. +type MessagesClientDequeueOptions struct { + // Optional. A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of + // 32. If fewer are visible, the visible messages are returned. By default, a single + // message is retrieved from the queue with this operation. + NumberOfMessages *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 + // Optional. Specifies the new visibility timeout value, in seconds, relative to server time. The default value is 30 seconds. + // A specified value must be larger than or equal to 1 second, and cannot be + // larger than 7 days, or larger than 2 hours on REST protocol versions prior to version 2011-08-18. The visibility timeout + // of a message can be set to a value later than the expiry time. + Visibilitytimeout *int32 +} + +// MessagesClientEnqueueOptions contains the optional parameters for the MessagesClient.Enqueue method. +type MessagesClientEnqueueOptions struct { + // Optional. Specifies the time-to-live interval for the message, in seconds. Prior to version 2017-07-29, the maximum time-to-live + // allowed is 7 days. For version 2017-07-29 or later, the maximum + // time-to-live can be any positive number, as well as -1 indicating that the message does not expire. If this parameter is + // omitted, the default time-to-live is 7 days. + MessageTimeToLive *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 + // Optional. If specified, the request must be made using an x-ms-version of 2011-08-18 or later. If not specified, the default + // value is 0. Specifies the new visibility timeout value, in seconds, + // relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility + // timeout of a message cannot be set to a value later than the expiry time. + // visibilitytimeout should be set to a value smaller than the time-to-live value. + Visibilitytimeout *int32 +} + +// MessagesClientPeekOptions contains the optional parameters for the MessagesClient.Peek method. +type MessagesClientPeekOptions struct { + // Optional. A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of + // 32. If fewer are visible, the visible messages are returned. By default, a single + // message is retrieved from the queue with this operation. + NumberOfMessages *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for queues +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Queue service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` +} + +// PeekedMessage - The object returned in the QueueMessageList array when calling Peek Messages on a Queue +type PeekedMessage struct { + // REQUIRED; The number of times the message has been dequeued. + DequeueCount *int64 `xml:"DequeueCount"` + + // REQUIRED; The time that the Message will expire and be automatically deleted. + ExpirationTime *time.Time `xml:"ExpirationTime"` + + // REQUIRED; The time the Message was inserted into the Queue. + InsertionTime *time.Time `xml:"InsertionTime"` + + // REQUIRED; The Id of the Message. + MessageID *string `xml:"MessageId"` + + // REQUIRED; The content of the Message. + MessageText *string `xml:"MessageText"` +} + +// Queue - An Azure Storage Queue. +type Queue struct { + // REQUIRED; The name of the Queue. + Name *string `xml:"Name"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` +} + +// QueueClientCreateOptions contains the optional parameters for the QueueClient.Create method. +type QueueClientCreateOptions struct { + // Optional. Include this parameter to specify that the queue's metadata be returned as part of the response body. Note that + // metadata requested with this parameter must be stored in accordance with the + // naming restrictions imposed by the 2009-09-19 version of the Queue service. Beginning with this version, all metadata names + // must adhere to the naming conventions for C# identifiers. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// QueueClientDeleteOptions contains the optional parameters for the QueueClient.Delete method. +type QueueClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// QueueClientGetAccessPolicyOptions contains the optional parameters for the QueueClient.GetAccessPolicy method. +type QueueClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// QueueClientGetPropertiesOptions contains the optional parameters for the QueueClient.GetProperties method. +type QueueClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// QueueClientSetAccessPolicyOptions contains the optional parameters for the QueueClient.SetAccessPolicy method. +type QueueClientSetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// QueueClientSetMetadataOptions contains the optional parameters for the QueueClient.SetMetadata method. +type QueueClientSetMetadataOptions struct { + // Optional. Include this parameter to specify that the queue's metadata be returned as part of the response body. Note that + // metadata requested with this parameter must be stored in accordance with the + // naming restrictions imposed by the 2009-09-19 version of the Queue service. Beginning with this version, all metadata names + // must adhere to the naming conventions for C# identifiers. + Metadata map[string]*string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// QueueMessage - A Message object which can be stored in a Queue +type QueueMessage struct { + // REQUIRED; The content of the message + MessageText *string `xml:"MessageText"` +} + +// RetentionPolicy - the retention policy +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// ServiceClientListQueuesSegmentOptions contains the optional parameters for the ServiceClient.NewListQueuesSegmentPager +// method. +type ServiceClientListQueuesSegmentOptions struct { + // Include this parameter to specify that the queues' metadata be returned as part of the response body. + Include []string + // A string value that identifies the portion of the list of queues to be returned with the next listing operation. The operation + // returns the NextMarker value within the response body if the listing + // operation did not return all queues remaining to be listed with the current page. The NextMarker value can be used as the + // value for the marker parameter in a subsequent call to request the next page + // of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of queues to return. If the request does not specify maxresults, or specifies a value greater + // than 5000, the server will return up to 5000 items. Note that if the listing + // operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will return + // fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only queues whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The The timeout parameter is expressed in seconds. For more information, see + Timeout *int32 +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; The access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +type StorageError struct { + Message *string `json:"Message,omitempty"` +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + CORS []*CORSRule `xml:"Cors>CorsRule"` + + // A summary of request statistics grouped by API in hourly aggregates for queues + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in minute aggregates for queues + MinuteMetrics *Metrics `xml:"MinuteMetrics"` +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models_serde.go new file mode 100644 index 00000000000..b3db14904ea --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_models_serde.go @@ -0,0 +1,281 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type DequeuedMessage. +func (d DequeuedMessage) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias DequeuedMessage + aux := &struct { + *alias + ExpirationTime *timeRFC1123 `xml:"ExpirationTime"` + InsertionTime *timeRFC1123 `xml:"InsertionTime"` + TimeNextVisible *timeRFC1123 `xml:"TimeNextVisible"` + }{ + alias: (*alias)(&d), + ExpirationTime: (*timeRFC1123)(d.ExpirationTime), + InsertionTime: (*timeRFC1123)(d.InsertionTime), + TimeNextVisible: (*timeRFC1123)(d.TimeNextVisible), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type DequeuedMessage. +func (d *DequeuedMessage) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias DequeuedMessage + aux := &struct { + *alias + ExpirationTime *timeRFC1123 `xml:"ExpirationTime"` + InsertionTime *timeRFC1123 `xml:"InsertionTime"` + TimeNextVisible *timeRFC1123 `xml:"TimeNextVisible"` + }{ + alias: (*alias)(d), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + d.ExpirationTime = (*time.Time)(aux.ExpirationTime) + d.InsertionTime = (*time.Time)(aux.InsertionTime) + d.TimeNextVisible = (*time.Time)(aux.TimeNextVisible) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type EnqueuedMessage. +func (e EnqueuedMessage) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias EnqueuedMessage + aux := &struct { + *alias + ExpirationTime *timeRFC1123 `xml:"ExpirationTime"` + InsertionTime *timeRFC1123 `xml:"InsertionTime"` + TimeNextVisible *timeRFC1123 `xml:"TimeNextVisible"` + }{ + alias: (*alias)(&e), + ExpirationTime: (*timeRFC1123)(e.ExpirationTime), + InsertionTime: (*timeRFC1123)(e.InsertionTime), + TimeNextVisible: (*timeRFC1123)(e.TimeNextVisible), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type EnqueuedMessage. +func (e *EnqueuedMessage) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias EnqueuedMessage + aux := &struct { + *alias + ExpirationTime *timeRFC1123 `xml:"ExpirationTime"` + InsertionTime *timeRFC1123 `xml:"InsertionTime"` + TimeNextVisible *timeRFC1123 `xml:"TimeNextVisible"` + }{ + alias: (*alias)(e), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + e.ExpirationTime = (*time.Time)(aux.ExpirationTime) + e.InsertionTime = (*time.Time)(aux.InsertionTime) + e.TimeNextVisible = (*time.Time)(aux.TimeNextVisible) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListQueuesSegmentResponse. +func (l ListQueuesSegmentResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListQueuesSegmentResponse + aux := &struct { + *alias + Queues *[]*Queue `xml:"Queues>Queue"` + }{ + alias: (*alias)(&l), + } + if l.Queues != nil { + aux.Queues = &l.Queues + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type PeekedMessage. +func (p PeekedMessage) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias PeekedMessage + aux := &struct { + *alias + ExpirationTime *timeRFC1123 `xml:"ExpirationTime"` + InsertionTime *timeRFC1123 `xml:"InsertionTime"` + }{ + alias: (*alias)(&p), + ExpirationTime: (*timeRFC1123)(p.ExpirationTime), + InsertionTime: (*timeRFC1123)(p.InsertionTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type PeekedMessage. +func (p *PeekedMessage) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias PeekedMessage + aux := &struct { + *alias + ExpirationTime *timeRFC1123 `xml:"ExpirationTime"` + InsertionTime *timeRFC1123 `xml:"InsertionTime"` + }{ + alias: (*alias)(p), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + p.ExpirationTime = (*time.Time)(aux.ExpirationTime) + p.InsertionTime = (*time.Time)(aux.InsertionTime) + return nil +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type Queue. +func (q *Queue) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias Queue + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(q), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + q.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + CORS *[]*CORSRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.CORS != nil { + aux.CORS = &s.CORS + } + return enc.EncodeElement(aux, start) +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_queue_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_queue_client.go new file mode 100644 index 00000000000..7fc10f7f2fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_queue_client.go @@ -0,0 +1,432 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// QueueClient contains the methods for the Queue group. +// Don't use this type directly, use NewQueueClient() instead. +type QueueClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewQueueClient creates a new instance of QueueClient with the specified values. +// - endpoint - The URL of the service account, queue or message that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewQueueClient(endpoint string, pl runtime.Pipeline) *QueueClient { + client := &QueueClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// Create - creates a new queue under the given account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - QueueClientCreateOptions contains the optional parameters for the QueueClient.Create method. +func (client *QueueClient) Create(ctx context.Context, options *QueueClientCreateOptions) (QueueClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options) + if err != nil { + return QueueClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return QueueClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated, http.StatusNoContent) { + return QueueClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *QueueClient) createCreateRequest(ctx context.Context, options *QueueClientCreateOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *QueueClient) createHandleResponse(resp *http.Response) (QueueClientCreateResponse, error) { + result := QueueClientCreateResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return QueueClientCreateResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Delete - operation permanently deletes the specified queue +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - QueueClientDeleteOptions contains the optional parameters for the QueueClient.Delete method. +func (client *QueueClient) Delete(ctx context.Context, options *QueueClientDeleteOptions) (QueueClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options) + if err != nil { + return QueueClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return QueueClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return QueueClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *QueueClient) deleteCreateRequest(ctx context.Context, options *QueueClientDeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *QueueClient) deleteHandleResponse(resp *http.Response) (QueueClientDeleteResponse, error) { + result := QueueClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return QueueClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetAccessPolicy - returns details about any stored access policies specified on the queue that may be used with Shared +// Access Signatures. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - QueueClientGetAccessPolicyOptions contains the optional parameters for the QueueClient.GetAccessPolicy method. +func (client *QueueClient) GetAccessPolicy(ctx context.Context, options *QueueClientGetAccessPolicyOptions) (QueueClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, options) + if err != nil { + return QueueClientGetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return QueueClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return QueueClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getAccessPolicyHandleResponse(resp) +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *QueueClient) getAccessPolicyCreateRequest(ctx context.Context, options *QueueClientGetAccessPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *QueueClient) getAccessPolicyHandleResponse(resp *http.Response) (QueueClientGetAccessPolicyResponse, error) { + result := QueueClientGetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return QueueClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return QueueClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetProperties - Retrieves user-defined metadata and queue properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - QueueClientGetPropertiesOptions contains the optional parameters for the QueueClient.GetProperties method. +func (client *QueueClient) GetProperties(ctx context.Context, options *QueueClientGetPropertiesOptions) (QueueClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return QueueClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return QueueClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return QueueClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *QueueClient) getPropertiesCreateRequest(ctx context.Context, options *QueueClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *QueueClient) getPropertiesHandleResponse(resp *http.Response) (QueueClientGetPropertiesResponse, error) { + result := QueueClientGetPropertiesResponse{} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-approximate-messages-count"); val != "" { + approximateMessagesCount32, err := strconv.ParseInt(val, 10, 32) + approximateMessagesCount := int32(approximateMessagesCount32) + if err != nil { + return QueueClientGetPropertiesResponse{}, err + } + result.ApproximateMessagesCount = &approximateMessagesCount + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return QueueClientGetPropertiesResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetAccessPolicy - sets stored access policies for the queue that may be used with Shared Access Signatures +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - queueACL - the acls for the queue +// - options - QueueClientSetAccessPolicyOptions contains the optional parameters for the QueueClient.SetAccessPolicy method. +func (client *QueueClient) SetAccessPolicy(ctx context.Context, queueACL []*SignedIdentifier, options *QueueClientSetAccessPolicyOptions) (QueueClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, queueACL, options) + if err != nil { + return QueueClientSetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return QueueClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return QueueClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessPolicyHandleResponse(resp) +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *QueueClient) setAccessPolicyCreateRequest(ctx context.Context, queueACL []*SignedIdentifier, options *QueueClientSetAccessPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + QueueACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + return req, runtime.MarshalAsXML(req, wrapper{QueueACL: &queueACL}) +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *QueueClient) setAccessPolicyHandleResponse(resp *http.Response) (QueueClientSetAccessPolicyResponse, error) { + result := QueueClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return QueueClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetMetadata - sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - QueueClientSetMetadataOptions contains the optional parameters for the QueueClient.SetMetadata method. +func (client *QueueClient) SetMetadata(ctx context.Context, options *QueueClientSetMetadataOptions) (QueueClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options) + if err != nil { + return QueueClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return QueueClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return QueueClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *QueueClient) setMetadataCreateRequest(ctx context.Context, options *QueueClientSetMetadataOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *QueueClient) setMetadataHandleResponse(resp *http.Response) (QueueClientSetMetadataResponse, error) { + result := QueueClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return QueueClientSetMetadataResponse{}, err + } + result.Date = &date + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_response_types.go new file mode 100644 index 00000000000..a5262814996 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_response_types.go @@ -0,0 +1,225 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import "time" + +// MessageIDClientDeleteResponse contains the response from method MessageIDClient.Delete. +type MessageIDClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// MessageIDClientUpdateResponse contains the response from method MessageIDClient.Update. +type MessageIDClientUpdateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // PopReceipt contains the information returned from the x-ms-popreceipt header response. + PopReceipt *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TimeNextVisible contains the information returned from the x-ms-time-next-visible header response. + TimeNextVisible *time.Time + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// MessagesClientClearResponse contains the response from method MessagesClient.Clear. +type MessagesClientClearResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// MessagesClientDequeueResponse contains the response from method MessagesClient.Dequeue. +type MessagesClientDequeueResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // The object returned when calling Get Messages on a Queue + Messages []*DequeuedMessage `xml:"QueueMessage"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// MessagesClientEnqueueResponse contains the response from method MessagesClient.Enqueue. +type MessagesClientEnqueueResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // The object returned when calling Put Message on a Queue + Messages []*EnqueuedMessage `xml:"QueueMessage"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// MessagesClientPeekResponse contains the response from method MessagesClient.Peek. +type MessagesClientPeekResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // The object returned when calling Peek Messages on a Queue + Messages []*PeekedMessage `xml:"QueueMessage"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// QueueClientCreateResponse contains the response from method QueueClient.Create. +type QueueClientCreateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// QueueClientDeleteResponse contains the response from method QueueClient.Delete. +type QueueClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// QueueClientGetAccessPolicyResponse contains the response from method QueueClient.GetAccessPolicy. +type QueueClientGetAccessPolicyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// QueueClientGetPropertiesResponse contains the response from method QueueClient.GetProperties. +type QueueClientGetPropertiesResponse struct { + // ApproximateMessagesCount contains the information returned from the x-ms-approximate-messages-count header response. + ApproximateMessagesCount *int32 + + // Date contains the information returned from the Date header response. + Date *time.Time + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// QueueClientSetAccessPolicyResponse contains the response from method QueueClient.SetAccessPolicy. +type QueueClientSetAccessPolicyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// QueueClientSetMetadataResponse contains the response from method QueueClient.SetMetadata. +type QueueClientSetMetadataResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + StorageServiceProperties + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { + StorageServiceStats + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientListQueuesSegmentResponse contains the response from method ServiceClient.NewListQueuesSegmentPager. +type ServiceClientListQueuesSegmentResponse struct { + ListQueuesSegmentResponse + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_service_client.go new file mode 100644 index 00000000000..cc1e4c713d7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_service_client.go @@ -0,0 +1,276 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" + "time" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use NewServiceClient() instead. +type ServiceClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, queue or message that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { + client := &ServiceClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// GetProperties - gets the properties of a storage account's Queue service, including properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to replication for the Queue service. It is only available on the secondary +// location endpoint when read-access geo-redundant replication is enabled for the storage +// account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + req, err := client.getStatisticsCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + } + return client.getStatisticsHandleResponse(resp) +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) { + result := ServiceClientGetStatisticsResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { + return ServiceClientGetStatisticsResponse{}, err + } + return result, nil +} + +// NewListQueuesSegmentPager - The List Queues Segment operation returns a list of the queues under the specified account +// +// Generated from API version 2018-03-28 +// - options - ServiceClientListQueuesSegmentOptions contains the optional parameters for the ServiceClient.NewListQueuesSegmentPager +// method. +// +// ListQueuesSegmentCreateRequest creates the ListQueuesFlatSegment ListQueuesSegment. +func (client *ServiceClient) ListQueuesSegmentCreateRequest(ctx context.Context, options *ServiceClientListQueuesSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listQueuesSegmentHandleResponse handles the ListQueuesSegment response. +func (client *ServiceClient) ListQueuesSegmentHandleResponse(resp *http.Response) (ServiceClientListQueuesSegmentResponse, error) { + result := ServiceClientListQueuesSegmentResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ServiceClientListQueuesSegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListQueuesSegmentResponse); err != nil { + return ServiceClientListQueuesSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's Queue service endpoint, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2018-03-28 +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2018-03-28"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, storageServiceProperties) +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc1123.go new file mode 100644 index 00000000000..4b4d51aa399 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc1123.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc3339.go new file mode 100644 index 00000000000..1ce9d621164 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_time_rfc3339.go @@ -0,0 +1,59 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_xml_helper.go new file mode 100644 index 00000000000..144ea18e1ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated/zz_xml_helper.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/xml" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + s := string(tt) + (*ap)[tokName] = &s + tokName = "" + break + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared/shared.go new file mode 100644 index 00000000000..e634fec6fbc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared/shared.go @@ -0,0 +1,146 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "hash/crc64" + "net" + "strings" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" +) + +const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 + +var CRC64Table = crc64.MakeTable(crc64Polynomial) + +// CopyOptions returns a zero-value T if opts is nil. +// If opts is not nil, a copy is made and its address returned. +func CopyOptions[T any](opts *T) *T { + if opts == nil { + return new(T) + } + cp := *opts + return &cp +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } + + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") + } + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.queue.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), + }, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + if queueEndpoint, ok := connStrMap["QueueEndpoint"]; ok { + return ParsedConnectionString{ + ServiceURL: queueEndpoint, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } + + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.queue.%v", protocol, accountName, suffix), + AccountName: accountName, + AccountKey: accountKey, + }, nil +} + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/queue/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/models.go new file mode 100644 index 00000000000..6d50c7b289a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/models.go @@ -0,0 +1,470 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azqueue + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// URLParts object represents the components that make up an Azure Storage Queue URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} + +// ================================================================ + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CORSRule = generated.CORSRule + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication = generated.GeoReplication + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy = generated.RetentionPolicy + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for queues +type Metrics = generated.Metrics + +// Logging - Azure Analytics Logging settings. +type Logging = generated.Logging + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties = generated.StorageServiceProperties + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats = generated.StorageServiceStats + +// SignedIdentifier - signed identifier +type SignedIdentifier = generated.SignedIdentifier + +// EnqueuedMessage - enqueued message +type EnqueuedMessage = generated.EnqueuedMessage + +// DequeuedMessage - dequeued message +type DequeuedMessage = generated.DequeuedMessage + +// PeekedMessage - peeked message +type PeekedMessage = generated.PeekedMessage + +// ListQueuesSegmentResponse - response segment +type ListQueuesSegmentResponse = generated.ListQueuesSegmentResponse + +// Queue - queue item +type Queue = generated.Queue + +// AccessPolicy - An Access policy +type AccessPolicy = generated.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a queue's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// --------------------------------------------------------------------------------------------------------------------- + +// ListQueuesOptions provides set of configurations for ListQueues operation +type ListQueuesOptions struct { + Include ListQueuesInclude + + // A string value that identifies the portion of the list of queues to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing operation did not return all queues + // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in + // a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of queues to return. If the request does not specify max results, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, + // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible + // that the service will return fewer results than specified by max results, or than the default of 5000. + MaxResults *int32 + + // Filters the results to return only queues whose name begins with the specified prefix. + Prefix *string +} + +// ListQueuesInclude indicates what additional information the service should return with each queue. +type ListQueuesInclude struct { + // Tells the service whether to return metadata for each queue. + Metadata bool +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions provides set of options for ServiceClient.SetProperties +type SetPropertiesOptions struct { + // The set of CORS rules. + CORS []*CORSRule + + // a summary of request statistics grouped by API in hour or minute aggregates for queues + HourMetrics *Metrics + + // Azure Analytics Logging settings. + Logging *Logging + + // a summary of request statistics grouped by API in hour or minute aggregates for queues + MinuteMetrics *Metrics +} + +func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { + if o == nil { + return generated.StorageServiceProperties{}, nil + } + + defaultVersion := to.Ptr[string]("1.0") + defaultAge := to.Ptr[int32](0) + emptyStr := to.Ptr[string]("") + + if o.CORS != nil { + for i := 0; i < len(o.CORS); i++ { + if o.CORS[i].AllowedHeaders == nil { + o.CORS[i].AllowedHeaders = emptyStr + } + if o.CORS[i].ExposedHeaders == nil { + o.CORS[i].ExposedHeaders = emptyStr + } + if o.CORS[i].MaxAgeInSeconds == nil { + o.CORS[i].MaxAgeInSeconds = defaultAge + } + } + } + + if o.HourMetrics != nil { + if o.HourMetrics.Version == nil { + o.HourMetrics.Version = defaultVersion + } + } + + if o.Logging != nil { + if o.Logging.Version == nil { + o.Logging.Version = defaultVersion + } + } + + if o.MinuteMetrics != nil { + if o.MinuteMetrics.Version == nil { + o.MinuteMetrics.Version = defaultVersion + } + + } + + return generated.StorageServiceProperties{ + CORS: o.CORS, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + }, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetServicePropertiesOptions contains the optional parameters for the ServiceClient.GetServiceProperties method. +type GetServicePropertiesOptions struct { + // placeholder for future options +} + +func (o *GetServicePropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetStatisticsOptions provides set of options for ServiceClient.GetStatistics +type GetStatisticsOptions struct { + // placeholder for future options +} + +func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOptions { + return nil +} + +// -------------------------------------------------QUEUES-------------------------------------------------------------- + +// CreateOptions contains the optional parameters for creating a queue. +type CreateOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the queue. + Metadata map[string]*string +} + +func (o *CreateOptions) format() *generated.QueueClientCreateOptions { + if o == nil { + return nil + } + return &generated.QueueClientCreateOptions{Metadata: o.Metadata} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for deleting a queue. +type DeleteOptions struct { +} + +func (o *DeleteOptions) format() *generated.QueueClientDeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the QueueClient.SetMetadata method. +type SetMetadataOptions struct { + Metadata map[string]*string +} + +func (o *SetMetadataOptions) format() *generated.QueueClientSetMetadataOptions { + if o == nil { + return nil + } + + return &generated.QueueClientSetMetadataOptions{Metadata: o.Metadata} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccessPolicyOptions contains the optional parameters for the QueueClient.GetAccessPolicy method. +type GetAccessPolicyOptions struct { +} + +func (o *GetAccessPolicyOptions) format() *generated.QueueClientGetAccessPolicyOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetAccessPolicyOptions provides set of configurations for QueueClient.SetAccessPolicy operation +type SetAccessPolicyOptions struct { + QueueACL []*SignedIdentifier +} + +func (o *SetAccessPolicyOptions) format() (*generated.QueueClientSetAccessPolicyOptions, []*SignedIdentifier, error) { + if o == nil { + return nil, nil, nil + } + if o.QueueACL != nil { + for _, c := range o.QueueACL { + err := formatTime(c) + if err != nil { + return nil, nil, err + } + } + } + return &generated.QueueClientSetAccessPolicyOptions{}, o.QueueACL, nil +} + +func formatTime(c *SignedIdentifier) error { + if c.AccessPolicy == nil { + return nil + } + + if c.AccessPolicy.Start != nil { + st, err := time.Parse(time.RFC3339, c.AccessPolicy.Start.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + c.AccessPolicy.Start = &st + } + if c.AccessPolicy.Expiry != nil { + et, err := time.Parse(time.RFC3339, c.AccessPolicy.Expiry.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + c.AccessPolicy.Expiry = &et + } + + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetQueuePropertiesOptions contains the optional parameters for the QueueClient.GetProperties method. +type GetQueuePropertiesOptions struct { +} + +func (o *GetQueuePropertiesOptions) format() *generated.QueueClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// EnqueueMessageOptions contains the optional parameters for the QueueClient.EnqueueMessage method. +type EnqueueMessageOptions struct { + // Specifies the time-to-live interval for the message, in seconds. + // The time-to-live may be any positive number or -1 for infinity. + // If this parameter is omitted, the default time-to-live is 7 days. + TimeToLive *int32 + // If not specified, the default value is 0. + // Specifies the new visibility timeout value, in seconds, relative to server time. + // The value must be larger than or equal to 0, and cannot be larger than 7 days. + // The visibility timeout of a message cannot be set to a value later than the expiry time. + // VisibilityTimeout should be set to a value smaller than the time-to-live value. + VisibilityTimeout *int32 +} + +func (o *EnqueueMessageOptions) format() *generated.MessagesClientEnqueueOptions { + if o == nil { + return nil + } + + return &generated.MessagesClientEnqueueOptions{MessageTimeToLive: o.TimeToLive, + Visibilitytimeout: o.VisibilityTimeout} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DequeueMessageOptions contains the optional parameters for the QueueClient.DequeueMessage method. +type DequeueMessageOptions struct { + // If not specified, the default value is 0. Specifies the new visibility timeout value, + // in seconds, relative to server time. The value must be larger than or equal to 0, and cannot be + // larger than 7 days. The visibility timeout of a message cannot be + // set to a value later than the expiry time. VisibilityTimeout + // should be set to a value smaller than the time-to-live value. + VisibilityTimeout *int32 +} + +func (o *DequeueMessageOptions) format() *generated.MessagesClientDequeueOptions { + numberOfMessages := int32(1) + if o == nil { + return &generated.MessagesClientDequeueOptions{NumberOfMessages: &numberOfMessages} + } + + return &generated.MessagesClientDequeueOptions{NumberOfMessages: &numberOfMessages, + Visibilitytimeout: o.VisibilityTimeout} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DequeueMessagesOptions contains the optional parameters for the QueueClient.DequeueMessages method. +type DequeueMessagesOptions struct { + // Optional. A nonzero integer value that specifies the number of messages to retrieve from the queue, + // up to a maximum of 32. If fewer messages are visible, the visible messages are returned. + // By default, a single message is retrieved from the queue with this operation. + NumberOfMessages *int32 + // If not specified, the default value is 30. Specifies the + // new visibility timeout value, in seconds, relative to server time. + // The value must be larger than or equal to 1, and cannot be + // larger than 7 days. The visibility timeout of a message cannot be + // set to a value later than the expiry time. VisibilityTimeout + // should be set to a value smaller than the time-to-live value. + VisibilityTimeout *int32 +} + +func (o *DequeueMessagesOptions) format() *generated.MessagesClientDequeueOptions { + if o == nil { + return nil + } + + return &generated.MessagesClientDequeueOptions{NumberOfMessages: o.NumberOfMessages, + Visibilitytimeout: o.VisibilityTimeout} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UpdateMessageOptions contains the optional parameters for the QueueClient.UpdateMessage method. +type UpdateMessageOptions struct { + VisibilityTimeout *int32 +} + +func (o *UpdateMessageOptions) format() *generated.MessageIDClientUpdateOptions { + defaultVT := to.Ptr(int32(0)) + if o == nil { + return &generated.MessageIDClientUpdateOptions{Visibilitytimeout: defaultVT} + } + if o.VisibilityTimeout == nil { + o.VisibilityTimeout = defaultVT + } + return &generated.MessageIDClientUpdateOptions{Visibilitytimeout: o.VisibilityTimeout} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteMessageOptions contains the optional parameters for the QueueClient.DeleteMessage method. +type DeleteMessageOptions struct { +} + +func (o *DeleteMessageOptions) format() *generated.MessageIDClientDeleteOptions { + if o == nil { + return nil + } + + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PeekMessageOptions contains the optional parameters for the QueueClient.PeekMessage method. +type PeekMessageOptions struct { +} + +func (o *PeekMessageOptions) format() *generated.MessagesClientPeekOptions { + numberOfMessages := int32(1) + return &generated.MessagesClientPeekOptions{NumberOfMessages: &numberOfMessages} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PeekMessagesOptions contains the optional parameters for the QueueClient.PeekMessages method. +type PeekMessagesOptions struct { + NumberOfMessages *int32 +} + +func (o *PeekMessagesOptions) format() *generated.MessagesClientPeekOptions { + if o == nil { + return nil + } + + return &generated.MessagesClientPeekOptions{NumberOfMessages: o.NumberOfMessages} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ClearMessagesOptions contains the optional parameters for the QueueClient.ClearMessages method. +type ClearMessagesOptions struct { +} + +func (o *ClearMessagesOptions) format() *generated.MessagesClientClearOptions { + if o == nil { + return nil + } + + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } + return st +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queue_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queue_client.go new file mode 100644 index 00000000000..d05ba16f273 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queue_client.go @@ -0,0 +1,255 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azqueue + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queueerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas" + "time" +) + +// QueueClient represents a URL to the Azure Queue Storage service allowing you to manipulate queues. +type QueueClient base.CompositeClient[generated.QueueClient, generated.MessagesClient] + +func (q *QueueClient) queueClient() *generated.QueueClient { + queue, _ := base.InnerClients((*base.CompositeClient[generated.QueueClient, generated.MessagesClient])(q)) + return queue +} + +func (q *QueueClient) messagesClient() *generated.MessagesClient { + _, messages := base.InnerClients((*base.CompositeClient[generated.QueueClient, generated.MessagesClient])(q)) + return messages +} + +func (q *QueueClient) getMessageIDURL(messageID string) string { + return runtime.JoinPaths(q.queueClient().Endpoint(), "messages", messageID) +} + +func (q *QueueClient) sharedKey() *SharedKeyCredential { + return base.SharedKeyComposite((*base.CompositeClient[generated.QueueClient, generated.MessagesClient])(q)) +} + +// URL returns the URL endpoint used by the ServiceClient object. +func (q *QueueClient) URL() string { + return q.queueClient().Endpoint() +} + +// NewQueueClient creates an instance of ServiceClient with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.queue.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewQueueClient(queueURL string, cred azcore.TokenCredential, options *ClientOptions) (*QueueClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*QueueClient)(base.NewQueueClient(queueURL, pl, nil)), nil +} + +// NewQueueClientWithNoCredential creates an instance of QueueClient with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.queue.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewQueueClientWithNoCredential(queueURL string, options *ClientOptions) (*QueueClient, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*QueueClient)(base.NewQueueClient(queueURL, pl, nil)), nil +} + +// NewQueueClientWithSharedKeyCredential creates an instance of ServiceClient with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.queue.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewQueueClientWithSharedKeyCredential(queueURL string, cred *SharedKeyCredential, options *ClientOptions) (*QueueClient, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*QueueClient)(base.NewQueueClient(queueURL, pl, cred)), nil +} + +// NewQueueClientFromConnectionString creates an instance of ServiceClient with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewQueueClientFromConnectionString(connectionString string, queueName string, options *ClientOptions) (*QueueClient, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, queueName) + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewQueueClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewQueueClientWithNoCredential(parsed.ServiceURL, options) +} + +// Create creates a new queue within a storage account. If a queue with the specified name already exists, and +// the existing metadata is identical to the metadata that's specified on the Create Queue request, +// status code 204 (No Content) is returned. If the existing metadata doesn't match the metadata provided with the Create Queue request, +// the operation fails and status code 409 (Conflict) is returned. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-queue4. +func (q *QueueClient) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + opts := options.format() + resp, err := q.queueClient().Create(ctx, opts) + return resp, err +} + +// Delete deletes the specified queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-queue3. +func (q *QueueClient) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts := options.format() + resp, err := q.queueClient().Delete(ctx, opts) + return resp, err +} + +// SetMetadata sets the metadata for the queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-queue-metadata. +func (q *QueueClient) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts := options.format() + resp, err := q.queueClient().SetMetadata(ctx, opts) + return resp, err +} + +// GetProperties gets properties including metadata of a queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-queue-metadata. +func (q *QueueClient) GetProperties(ctx context.Context, options *GetQueuePropertiesOptions) (GetQueuePropertiesResponse, error) { + opts := options.format() + resp, err := q.queueClient().GetProperties(ctx, opts) + return resp, err +} + +// GetAccessPolicy returns the queue's access policy. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-queue-acl. +func (q *QueueClient) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + options := o.format() + resp, err := q.queueClient().GetAccessPolicy(ctx, options) + return resp, err +} + +// SetAccessPolicy sets the queue's permissions. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-queue-acl. +func (q *QueueClient) SetAccessPolicy(ctx context.Context, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + opts, acl, err := o.format() + if err != nil { + return SetAccessPolicyResponse{}, err + } + resp, err := q.queueClient().SetAccessPolicy(ctx, acl, opts) + return resp, err +} + +// EnqueueMessage adds a message to the queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/put-message. +func (q *QueueClient) EnqueueMessage(ctx context.Context, content string, o *EnqueueMessageOptions) (EnqueueMessagesResponse, error) { + opts := o.format() + message := generated.QueueMessage{MessageText: &content} + resp, err := q.messagesClient().Enqueue(ctx, message, opts) + return resp, err +} + +// DequeueMessage removes one message from the queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-messages. +func (q *QueueClient) DequeueMessage(ctx context.Context, o *DequeueMessageOptions) (DequeueMessagesResponse, error) { + opts := o.format() + resp, err := q.messagesClient().Dequeue(ctx, opts) + return resp, err +} + +// UpdateMessage updates a message from the queue with the given popReceipt. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/update-message. +func (q *QueueClient) UpdateMessage(ctx context.Context, messageID string, popReceipt string, content string, o *UpdateMessageOptions) (UpdateMessageResponse, error) { + opts := o.format() + message := generated.QueueMessage{MessageText: &content} + messageClient := generated.NewMessageIDClient(q.getMessageIDURL(messageID), q.queueClient().Pipeline()) + resp, err := messageClient.Update(ctx, popReceipt, message, opts) + return resp, err +} + +// DeleteMessage deletes message from queue with the given popReceipt. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-message2. +func (q *QueueClient) DeleteMessage(ctx context.Context, messageID string, popReceipt string, o *DeleteMessageOptions) (DeleteMessageResponse, error) { + opts := o.format() + messageClient := generated.NewMessageIDClient(q.getMessageIDURL(messageID), q.queueClient().Pipeline()) + resp, err := messageClient.Delete(ctx, popReceipt, opts) + return resp, err +} + +// PeekMessage peeks the first message from the queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/peek-messages. +func (q *QueueClient) PeekMessage(ctx context.Context, o *PeekMessageOptions) (PeekMessagesResponse, error) { + opts := o.format() + resp, err := q.messagesClient().Peek(ctx, opts) + return resp, err +} + +// DequeueMessages removes one or more messages from the queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-messages. +func (q *QueueClient) DequeueMessages(ctx context.Context, o *DequeueMessagesOptions) (DequeueMessagesResponse, error) { + opts := o.format() + resp, err := q.messagesClient().Dequeue(ctx, opts) + return resp, err +} + +// PeekMessages peeks one or more messages from the queue +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/peek-messages. +func (q *QueueClient) PeekMessages(ctx context.Context, o *PeekMessagesOptions) (PeekMessagesResponse, error) { + opts := o.format() + resp, err := q.messagesClient().Peek(ctx, opts) + return resp, err +} + +// ClearMessages deletes all messages from the queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/clear-messages. +func (q *QueueClient) ClearMessages(ctx context.Context, o *ClearMessagesOptions) (ClearMessagesResponse, error) { + opts := o.format() + resp, err := q.messagesClient().Clear(ctx, opts) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +// This validity can be checked with CanGetAccountSASToken(). +func (q *QueueClient) GetSASURL(permissions sas.QueuePermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if q.sharedKey() == nil { + return "", queueerror.MissingSharedKeyCredential + } + + st := o.format() + urlParts, err := ParseURL(q.URL()) + if err != nil { + return "", err + } + qps, err := sas.QueueSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + StartTime: st, + ExpiryTime: expiry.UTC(), + Permissions: permissions.String(), + QueueName: urlParts.QueueName, + }.SignWithSharedKey(q.sharedKey()) + if err != nil { + return "", err + } + + endpoint := q.URL() + "?" + qps.Encode() + + return endpoint, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queueerror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queueerror/error_codes.go new file mode 100644 index 00000000000..3527c036f7f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queueerror/error_codes.go @@ -0,0 +1,99 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package queueerror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMarker Code = "InvalidMarker" + InvalidMetadata Code = "InvalidMetadata" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidURI Code = "InvalidUri" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + MD5Mismatch Code = "Md5Mismatch" + MessageNotFound Code = "MessageNotFound" + MessageTooLarge Code = "MessageTooLarge" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + PopReceiptMismatch Code = "PopReceiptMismatch" + QueueAlreadyExists Code = "QueueAlreadyExists" + QueueBeingDeleted Code = "QueueBeingDeleted" + QueueDisabled Code = "QueueDisabled" + QueueNotEmpty Code = "QueueNotEmpty" + QueueNotFound Code = "QueueNotFound" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + ServerBusy Code = "ServerBusy" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/responses.go new file mode 100644 index 00000000000..70231c7ec9f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/responses.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azqueue + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated" +) + +// CreateQueueResponse contains the response from method queue.ServiceClient.Create. +type CreateQueueResponse = generated.QueueClientCreateResponse + +// DeleteQueueResponse contains the response from method queue.ServiceClient.Delete +type DeleteQueueResponse = generated.QueueClientDeleteResponse + +// ListQueuesResponse contains the response from method ServiceClient.ListQueuesSegment. +type ListQueuesResponse = generated.ServiceClientListQueuesSegmentResponse + +// GetServicePropertiesResponse contains the response from method ServiceClient.GetServiceProperties. +type GetServicePropertiesResponse = generated.ServiceClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse + +// GetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type GetStatisticsResponse = generated.ServiceClientGetStatisticsResponse + +//------------------------------------------ QUEUES ------------------------------------------------------------------- + +// CreateResponse contains the response from method QueueClient.Create. +type CreateResponse = generated.QueueClientCreateResponse + +// DeleteResponse contains the response from method QueueClient.Delete. +type DeleteResponse = generated.QueueClientDeleteResponse + +// SetMetadataResponse contains the response from method QueueClient.SetMetadata. +type SetMetadataResponse = generated.QueueClientSetMetadataResponse + +// GetAccessPolicyResponse contains the response from method QueueClient.GetAccessPolicy. +type GetAccessPolicyResponse = generated.QueueClientGetAccessPolicyResponse + +// SetAccessPolicyResponse contains the response from method QueueClient.SetAccessPolicy. +type SetAccessPolicyResponse = generated.QueueClientSetAccessPolicyResponse + +// GetQueuePropertiesResponse contains the response from method QueueClient.GetProperties. +type GetQueuePropertiesResponse = generated.QueueClientGetPropertiesResponse + +// EnqueueMessagesResponse contains the response from method QueueClient.EnqueueMessage. +type EnqueueMessagesResponse = generated.MessagesClientEnqueueResponse + +// DequeueMessagesResponse contains the response from method QueueClient.DequeueMessage or QueueClient.DequeueMessages. +type DequeueMessagesResponse = generated.MessagesClientDequeueResponse + +// UpdateMessageResponse contains the response from method QueueClient.UpdateMessage. +type UpdateMessageResponse = generated.MessageIDClientUpdateResponse + +// DeleteMessageResponse contains the response from method QueueClient.DeleteMessage. +type DeleteMessageResponse = generated.MessageIDClientDeleteResponse + +// PeekMessagesResponse contains the response from method QueueClient.PeekMessage or QueueClient.PeekMessages. +type PeekMessagesResponse = generated.MessagesClientPeekResponse + +// ClearMessagesResponse contains the response from method QueueClient.ClearMessages. +type ClearMessagesResponse = generated.MessagesClientClearResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/account.go new file mode 100644 index 00000000000..8c4c8074db2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/account.go @@ -0,0 +1,223 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + + startTime, expiryTime := formatTimesForSigning(v.StartTime, v.ExpiryTime) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + "q", + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: "q", + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. +type AccountPermissions struct { + Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, Tag, FilterByTags, SetImmutabilityPolicy bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSignatureValues' Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.PermanentDelete { + buffer.WriteRune('y') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + if p.SetImmutabilityPolicy { + buffer.WriteRune('i') + } + return buffer.String() +} + +// Parse initializes the AccountPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'y': + p.PermanentDelete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'i': + p.SetImmutabilityPolicy = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSignatureValues' ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) + } + } + return rt, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/query_params.go new file mode 100644 index 00000000000..6f64f6a243d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/query_params.go @@ -0,0 +1,504 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// timeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + timeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 +) + +const ( + // Version is the default version encoded in the SAS token. + Version = "2020-02-10" +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", timeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime time.Time) (string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + return ss, se +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, timeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(timeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling newQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOID string `param:"skoid"` + signedTID string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + authorizedObjectID string `param:"saoid"` + unauthorizedObjectID string `param:"suoid"` + correlationID string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// AuthorizedObjectID returns authorizedObjectID +func (p *QueryParameters) AuthorizedObjectID() string { + return p.authorizedObjectID +} + +// UnauthorizedObjectID returns unauthorizedObjectID +func (p *QueryParameters) UnauthorizedObjectID() string { + return p.unauthorizedObjectID +} + +// SignedCorrelationID returns signedCorrelationID +func (p *QueryParameters) SignedCorrelationID() string { + return p.correlationID +} + +// SignedOID returns signedOID +func (p *QueryParameters) SignedOID() string { + return p.signedOID +} + +// SignedTID returns signedTID +func (p *QueryParameters) SignedTID() string { + return p.signedTID +} + +// SignedStart returns signedStart +func (p *QueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry +func (p *QueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService +func (p *QueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion +func (p *QueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime +func (p *QueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns sontentType +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth +func (p *QueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOID != "" { + v.Add("skoid", p.signedOID) + v.Add("sktid", p.signedTID) + v.Add("skt", p.signedStart.Format(timeFormat)) + v.Add("ske", p.signedExpiry.Format(timeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.authorizedObjectID != "" { + v.Add("saoid", p.authorizedObjectID) + } + if p.unauthorizedObjectID != "" { + v.Add("suoid", p.unauthorizedObjectID) + } + if p.correlationID != "" { + v.Add("scid", p.correlationID) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If a key is unrecognized, it is ignored +func NewQueryParameters(values url.Values) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(timeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(timeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.authorizedObjectID = val + case "suoid": + p.unauthorizedObjectID = val + case "scid": + p.correlationID = val + default: + continue // query param didn't get recognized + } + } + return p +} + +// newQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOID = val + case "sktid": + p.signedTID = val + case "skt": + p.signedStart, _ = time.Parse(timeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(timeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.authorizedObjectID = val + case "suoid": + p.unauthorizedObjectID = val + case "scid": + p.correlationID = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/service.go new file mode 100644 index 00000000000..2a7477d9501 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/service.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported" +) + +// QueueSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage Queue. +// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// Delegation SAS not supported for queues service +type QueueSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a QueuePermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + QueueName string +} + +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v QueueSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") + } + + //Make sure the permission characters are in the correct order + perms, err := parseQueuePermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime := formatTimesForSigning(v.StartTime, v.ExpiryTime) + + signedIdentifier := v.Identifier + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.QueueName), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version}, + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// getCanonicalName computes the canonical name for a queue resource for SAS signing. +func getCanonicalName(account string, queueName string) string { + elements := []string{"/queue/", account, "/", queueName} + return strings.Join(elements, "") +} + +// QueuePermissions type simplifies creating the permissions string for an Azure Storage Queue SAS. +// Initialize an instance of this type and then call its String method to set QueueSignatureValues' Permissions field. +type QueuePermissions struct { + Read, Add, Update, Process bool +} + +// String produces the SAS permissions string for an Azure Storage Queue. +// Call this method to set QueueSignatureValues' Permissions field. +func (p *QueuePermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Update { + b.WriteRune('u') + } + if p.Process { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the QueuePermissions' fields from a string. +func parseQueuePermissions(s string) (QueuePermissions, error) { + p := QueuePermissions{} + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'u': + p.Update = true + case 'p': + p.Process = true + default: + return QueuePermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/url_parts.go new file mode 100644 index 00000000000..99e0732f9d1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas/url_parts.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared" +) + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/queuename" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Queue URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.queue.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + QueueName string // "" if no queue + SAS QueryParameters + UnparsedParams string +} + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return URLParts{}, err + } + + up := URLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + // Find the queue name (if any) + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if shared.IsIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no queue name + up.IPEndpointStyleInfo.AccountName = path + path = "" // No queue name present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (queue name) + } + } + up.QueueName = path + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + up.SAS = newQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery +// field contains the SAS and unparsed query parameters. +func (up URLParts) String() string { + path := "" + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate queue name + if up.QueueName != "" { + path += "/" + up.QueueName + } + + rawQuery := up.UnparsedParams + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/service_client.go new file mode 100644 index 00000000000..3782de66221 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/service_client.go @@ -0,0 +1,223 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package azqueue + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queueerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas" + "net/http" + "net/url" + "strings" + "time" +) + +// ClientOptions contains the optional parameters when creating a ServiceClient or QueueClient. +type ClientOptions struct { + azcore.ClientOptions +} + +// ServiceClient represents a URL to the Azure Queue Storage service allowing you to manipulate queues. +type ServiceClient base.Client[generated.ServiceClient] + +// NewServiceClient creates an instance of ServiceClient with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.queue.core.windows.net/ +// - cred - an Azure AD credential, typically obtained via the azidentity module +// - options - client options; pass nil to accept the default values +func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*ServiceClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*ServiceClient)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewServiceClientWithNoCredential creates an instance of ServiceClient with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.queue.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (*ServiceClient, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*ServiceClient)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewServiceClientWithSharedKeyCredential creates an instance of ServiceClient with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.queue.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewServiceClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*ServiceClient, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*ServiceClient)(base.NewServiceClient(serviceURL, pl, cred)), nil +} + +// NewServiceClientFromConnectionString creates an instance of ServiceClient with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (*ServiceClient, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewServiceClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewServiceClientWithNoCredential(parsed.ServiceURL, options) +} + +func (s *ServiceClient) generated() *generated.ServiceClient { + return base.InnerClient((*base.Client[generated.ServiceClient])(s)) +} + +func (s *ServiceClient) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ServiceClient])(s)) +} + +// URL returns the URL endpoint used by the ServiceClient object. +func (s *ServiceClient) URL() string { + return s.generated().Endpoint() +} + +// GetServiceProperties - gets the properties of a storage account's Queue service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (s *ServiceClient) GetServiceProperties(ctx context.Context, o *GetServicePropertiesOptions) (GetServicePropertiesResponse, error) { + getPropertiesOptions := o.format() + resp, err := s.generated().GetProperties(ctx, getPropertiesOptions) + return resp, err +} + +// SetProperties Sets the properties of a storage account's Queue service, including Azure Storage Analytics. +// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. +func (s *ServiceClient) SetProperties(ctx context.Context, o *SetPropertiesOptions) (SetPropertiesResponse, error) { + properties, setPropertiesOptions := o.format() + resp, err := s.generated().SetProperties(ctx, properties, setPropertiesOptions) + return resp, err +} + +// GetStatistics Retrieves statistics related to replication for the Queue service. +func (s *ServiceClient) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (GetStatisticsResponse, error) { + getStatisticsOptions := o.format() + resp, err := s.generated().GetStatistics(ctx, getStatisticsOptions) + + return resp, err +} + +// NewListQueuesPager operation returns a pager of the queues under the specified account. +// Use an empty Marker to start enumeration from the beginning. Queue names are returned in lexicographic order. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-queues1. +func (s *ServiceClient) NewListQueuesPager(o *ListQueuesOptions) *runtime.Pager[ListQueuesResponse] { + listOptions := generated.ServiceClientListQueuesSegmentOptions{} + if o != nil { + if o.Include.Metadata { + listOptions.Include = append(listOptions.Include, "metadata") + } + listOptions.Marker = o.Marker + listOptions.Maxresults = o.MaxResults + listOptions.Prefix = o.Prefix + } + return runtime.NewPager(runtime.PagingHandler[ListQueuesResponse]{ + More: func(page ListQueuesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListQueuesResponse) (ListQueuesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generated().ListQueuesSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = s.generated().ListQueuesSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListQueuesResponse{}, err + } + resp, err := s.generated().Pipeline().Do(req) + if err != nil { + return ListQueuesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListQueuesResponse{}, runtime.NewResponseError(resp) + } + return s.generated().ListQueuesSegmentHandleResponse(resp) + }, + }) +} + +// NewQueueClient creates a new QueueClient object by concatenating queueName to the end of +// this Client's URL. The new QueueClient uses the same request policy pipeline as the Client. +func (s *ServiceClient) NewQueueClient(queueName string) *QueueClient { + queueName = url.PathEscape(queueName) + queueURL := runtime.JoinPaths(s.URL(), queueName) + return (*QueueClient)(base.NewQueueClient(queueURL, s.generated().Pipeline(), s.sharedKey())) +} + +// CreateQueue creates a new queue within a storage account. If a queue with the same name already exists, the operation fails. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-queue4. +func (s *ServiceClient) CreateQueue(ctx context.Context, queueName string, options *CreateOptions) (CreateResponse, error) { + queueName = url.PathEscape(queueName) + queueURL := runtime.JoinPaths(s.URL(), queueName) + qC := (*QueueClient)(base.NewQueueClient(queueURL, s.generated().Pipeline(), s.sharedKey())) + return qC.Create(ctx, options) +} + +// DeleteQueue deletes the specified queue. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-queue3. +func (s *ServiceClient) DeleteQueue(ctx context.Context, queueName string, options *DeleteOptions) (DeleteResponse, error) { + queueName = url.PathEscape(queueName) + queueURL := runtime.JoinPaths(s.URL(), queueName) + qC := (*QueueClient)(base.NewQueueClient(queueURL, s.generated().Pipeline(), s.sharedKey())) + return qC.Delete(ctx, options) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +// This validity can be checked with CanGetAccountSASToken(). +func (s *ServiceClient) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if s.sharedKey() == nil { + return "", queueerror.MissingSharedKeyCredential + } + st := o.format() + qps, err := sas.AccountSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + Permissions: permissions.String(), + ResourceTypes: resources.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + // add a trailing slash to be consistent with the portal + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/test-resources.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/test-resources.json new file mode 100644 index 00000000000..5912abdc0a7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/test-resources.json @@ -0,0 +1,516 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2022-09-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "immutableAccountName": "[concat(parameters('baseName'), 'imm')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "deleteRetentionPolicy": { + "allowPermanentDelete": true, + "enabled": true, + "days": 1 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/CHANGELOG.md deleted file mode 100644 index 52911e4cc5e..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/CHANGELOG.md +++ /dev/null @@ -1,2 +0,0 @@ -# Change History - diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/_meta.json deleted file mode 100644 index 668f0aa105c..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/eventhub/resource-manager/readme.md", - "tag": "package-2017-04", - "use": "@microsoft.azure/autorest.go@2.1.187", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2017-04 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/eventhub/resource-manager/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/client.go deleted file mode 100644 index 0e48a44d637..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/client.go +++ /dev/null @@ -1,43 +0,0 @@ -// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details. -// -// Package eventhub implements the Azure ARM Eventhub service API version 2017-04-01. -// -// Azure Event Hubs client -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Eventhub - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Eventhub. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with -// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/consumergroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/consumergroups.go deleted file mode 100644 index 980ef5978c2..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/consumergroups.go +++ /dev/null @@ -1,476 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ConsumerGroupsClient is the azure Event Hubs client -type ConsumerGroupsClient struct { - BaseClient -} - -// NewConsumerGroupsClient creates an instance of the ConsumerGroupsClient client. -func NewConsumerGroupsClient(subscriptionID string) ConsumerGroupsClient { - return NewConsumerGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewConsumerGroupsClientWithBaseURI creates an instance of the ConsumerGroupsClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewConsumerGroupsClientWithBaseURI(baseURI string, subscriptionID string) ConsumerGroupsClient { - return ConsumerGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates an Event Hubs consumer group as a nested resource within a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// consumerGroupName - the consumer group name -// parameters - parameters supplied to create or update a consumer group resource. -func (client ConsumerGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, parameters ConsumerGroup) (result ConsumerGroup, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ConsumerGroupsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: consumerGroupName, - Constraints: []validation.Constraint{{Target: "consumerGroupName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "consumerGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.ConsumerGroupsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, namespaceName, eventHubName, consumerGroupName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ConsumerGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string, parameters ConsumerGroup) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "consumerGroupName": autorest.Encode("path", consumerGroupName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ConsumerGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ConsumerGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ConsumerGroup, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a consumer group from the specified Event Hub and resource group. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// consumerGroupName - the consumer group name -func (client ConsumerGroupsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ConsumerGroupsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: consumerGroupName, - Constraints: []validation.Constraint{{Target: "consumerGroupName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "consumerGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.ConsumerGroupsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, namespaceName, eventHubName, consumerGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ConsumerGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "consumerGroupName": autorest.Encode("path", consumerGroupName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ConsumerGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ConsumerGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a description for the specified consumer group. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// consumerGroupName - the consumer group name -func (client ConsumerGroupsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string) (result ConsumerGroup, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ConsumerGroupsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: consumerGroupName, - Constraints: []validation.Constraint{{Target: "consumerGroupName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "consumerGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.ConsumerGroupsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, namespaceName, eventHubName, consumerGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ConsumerGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, consumerGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "consumerGroupName": autorest.Encode("path", consumerGroupName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups/{consumerGroupName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ConsumerGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ConsumerGroupsClient) GetResponder(resp *http.Response) (result ConsumerGroup, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByEventHub gets all the consumer groups in a Namespace. An empty feed is returned if no consumer group exists in -// the Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// skip - skip is only used if a previous operation returned a partial result. If a previous response contains -// a nextLink element, the value of the nextLink element will include a skip parameter that specifies a -// starting point to use for subsequent calls. -// top - may be used to limit the number of results to the most recent N usageDetails. -func (client ConsumerGroupsClient) ListByEventHub(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, skip *int32, top *int32) (result ConsumerGroupListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ConsumerGroupsClient.ListByEventHub") - defer func() { - sc := -1 - if result.cglr.Response.Response != nil { - sc = result.cglr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, - {Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, - }}}}, - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, - {Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("eventhub.ConsumerGroupsClient", "ListByEventHub", err.Error()) - } - - result.fn = client.listByEventHubNextResults - req, err := client.ListByEventHubPreparer(ctx, resourceGroupName, namespaceName, eventHubName, skip, top) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "ListByEventHub", nil, "Failure preparing request") - return - } - - resp, err := client.ListByEventHubSender(req) - if err != nil { - result.cglr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "ListByEventHub", resp, "Failure sending request") - return - } - - result.cglr, err = client.ListByEventHubResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "ListByEventHub", resp, "Failure responding to request") - return - } - if result.cglr.hasNextLink() && result.cglr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByEventHubPreparer prepares the ListByEventHub request. -func (client ConsumerGroupsClient) ListByEventHubPreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, skip *int32, top *int32) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/consumergroups", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByEventHubSender sends the ListByEventHub request. The method will close the -// http.Response Body if it receives an error. -func (client ConsumerGroupsClient) ListByEventHubSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByEventHubResponder handles the response to the ListByEventHub request. The method always -// closes the http.Response Body. -func (client ConsumerGroupsClient) ListByEventHubResponder(resp *http.Response) (result ConsumerGroupListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByEventHubNextResults retrieves the next set of results, if any. -func (client ConsumerGroupsClient) listByEventHubNextResults(ctx context.Context, lastResults ConsumerGroupListResult) (result ConsumerGroupListResult, err error) { - req, err := lastResults.consumerGroupListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "listByEventHubNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByEventHubSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "listByEventHubNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByEventHubResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.ConsumerGroupsClient", "listByEventHubNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByEventHubComplete enumerates all values, automatically crossing page boundaries as required. -func (client ConsumerGroupsClient) ListByEventHubComplete(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, skip *int32, top *int32) (result ConsumerGroupListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ConsumerGroupsClient.ListByEventHub") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByEventHub(ctx, resourceGroupName, namespaceName, eventHubName, skip, top) - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/disasterrecoveryconfigs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/disasterrecoveryconfigs.go deleted file mode 100644 index cc97059e981..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/disasterrecoveryconfigs.go +++ /dev/null @@ -1,1031 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// DisasterRecoveryConfigsClient is the azure Event Hubs client -type DisasterRecoveryConfigsClient struct { - BaseClient -} - -// NewDisasterRecoveryConfigsClient creates an instance of the DisasterRecoveryConfigsClient client. -func NewDisasterRecoveryConfigsClient(subscriptionID string) DisasterRecoveryConfigsClient { - return NewDisasterRecoveryConfigsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewDisasterRecoveryConfigsClientWithBaseURI creates an instance of the DisasterRecoveryConfigsClient client using a -// custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, -// Azure stack). -func NewDisasterRecoveryConfigsClientWithBaseURI(baseURI string, subscriptionID string) DisasterRecoveryConfigsClient { - return DisasterRecoveryConfigsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// BreakPairing this operation disables the Disaster Recovery and stops replicating changes from primary to secondary -// namespaces -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -func (client DisasterRecoveryConfigsClient) BreakPairing(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.BreakPairing") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "BreakPairing", err.Error()) - } - - req, err := client.BreakPairingPreparer(ctx, resourceGroupName, namespaceName, alias) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "BreakPairing", nil, "Failure preparing request") - return - } - - resp, err := client.BreakPairingSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "BreakPairing", resp, "Failure sending request") - return - } - - result, err = client.BreakPairingResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "BreakPairing", resp, "Failure responding to request") - return - } - - return -} - -// BreakPairingPreparer prepares the BreakPairing request. -func (client DisasterRecoveryConfigsClient) BreakPairingPreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/breakPairing", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BreakPairingSender sends the BreakPairing request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) BreakPairingSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// BreakPairingResponder handles the response to the BreakPairing request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) BreakPairingResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// CheckNameAvailability check the give Namespace name availability. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// parameters - parameters to check availability of the given Alias name -func (client DisasterRecoveryConfigsClient) CheckNameAvailability(ctx context.Context, resourceGroupName string, namespaceName string, parameters CheckNameAvailabilityParameter) (result CheckNameAvailabilityResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.CheckNameAvailability") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Name", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "CheckNameAvailability", err.Error()) - } - - req, err := client.CheckNameAvailabilityPreparer(ctx, resourceGroupName, namespaceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "CheckNameAvailability", nil, "Failure preparing request") - return - } - - resp, err := client.CheckNameAvailabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "CheckNameAvailability", resp, "Failure sending request") - return - } - - result, err = client.CheckNameAvailabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "CheckNameAvailability", resp, "Failure responding to request") - return - } - - return -} - -// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. -func (client DisasterRecoveryConfigsClient) CheckNameAvailabilityPreparer(ctx context.Context, resourceGroupName string, namespaceName string, parameters CheckNameAvailabilityParameter) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/checkNameAvailability", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdate creates or updates a new Alias(Disaster Recovery configuration) -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -// parameters - parameters required to create an Alias(Disaster Recovery configuration) -func (client DisasterRecoveryConfigsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, alias string, parameters ArmDisasterRecovery) (result ArmDisasterRecovery, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, namespaceName, alias, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client DisasterRecoveryConfigsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string, parameters ArmDisasterRecovery) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) CreateOrUpdateResponder(resp *http.Response) (result ArmDisasterRecovery, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes an Alias(Disaster Recovery configuration) -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -func (client DisasterRecoveryConfigsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, namespaceName, alias) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client DisasterRecoveryConfigsClient) DeletePreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// FailOver invokes GEO DR failover and reconfigure the alias to point to the secondary namespace -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -func (client DisasterRecoveryConfigsClient) FailOver(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.FailOver") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "FailOver", err.Error()) - } - - req, err := client.FailOverPreparer(ctx, resourceGroupName, namespaceName, alias) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "FailOver", nil, "Failure preparing request") - return - } - - resp, err := client.FailOverSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "FailOver", resp, "Failure sending request") - return - } - - result, err = client.FailOverResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "FailOver", resp, "Failure responding to request") - return - } - - return -} - -// FailOverPreparer prepares the FailOver request. -func (client DisasterRecoveryConfigsClient) FailOverPreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/failover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// FailOverSender sends the FailOver request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) FailOverSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// FailOverResponder handles the response to the FailOver request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) FailOverResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get retrieves Alias(Disaster Recovery configuration) for primary or secondary namespace -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -func (client DisasterRecoveryConfigsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result ArmDisasterRecovery, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, namespaceName, alias) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client DisasterRecoveryConfigsClient) GetPreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) GetResponder(resp *http.Response) (result ArmDisasterRecovery, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAuthorizationRule gets an AuthorizationRule for a Namespace by rule name. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -// authorizationRuleName - the authorization rule name. -func (client DisasterRecoveryConfigsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (result AuthorizationRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.GetAuthorizationRule") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "GetAuthorizationRule", err.Error()) - } - - req, err := client.GetAuthorizationRulePreparer(ctx, resourceGroupName, namespaceName, alias, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "GetAuthorizationRule", nil, "Failure preparing request") - return - } - - resp, err := client.GetAuthorizationRuleSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "GetAuthorizationRule", resp, "Failure sending request") - return - } - - result, err = client.GetAuthorizationRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "GetAuthorizationRule", resp, "Failure responding to request") - return - } - - return -} - -// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. -func (client DisasterRecoveryConfigsClient) GetAuthorizationRulePreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules/{authorizationRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) GetAuthorizationRuleResponder(resp *http.Response) (result AuthorizationRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets all Alias(Disaster Recovery configurations) -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -func (client DisasterRecoveryConfigsClient) List(ctx context.Context, resourceGroupName string, namespaceName string) (result ArmDisasterRecoveryListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.List") - defer func() { - sc := -1 - if result.adrlr.Response.Response != nil { - sc = result.adrlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "List", err.Error()) - } - - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, resourceGroupName, namespaceName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.adrlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "List", resp, "Failure sending request") - return - } - - result.adrlr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "List", resp, "Failure responding to request") - return - } - if result.adrlr.hasNextLink() && result.adrlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client DisasterRecoveryConfigsClient) ListPreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) ListResponder(resp *http.Response) (result ArmDisasterRecoveryListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client DisasterRecoveryConfigsClient) listNextResults(ctx context.Context, lastResults ArmDisasterRecoveryListResult) (result ArmDisasterRecoveryListResult, err error) { - req, err := lastResults.armDisasterRecoveryListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client DisasterRecoveryConfigsClient) ListComplete(ctx context.Context, resourceGroupName string, namespaceName string) (result ArmDisasterRecoveryListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, resourceGroupName, namespaceName) - return -} - -// ListAuthorizationRules gets a list of authorization rules for a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -func (client DisasterRecoveryConfigsClient) ListAuthorizationRules(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result AuthorizationRuleListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.ListAuthorizationRules") - defer func() { - sc := -1 - if result.arlr.Response.Response != nil { - sc = result.arlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "ListAuthorizationRules", err.Error()) - } - - result.fn = client.listAuthorizationRulesNextResults - req, err := client.ListAuthorizationRulesPreparer(ctx, resourceGroupName, namespaceName, alias) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "ListAuthorizationRules", nil, "Failure preparing request") - return - } - - resp, err := client.ListAuthorizationRulesSender(req) - if err != nil { - result.arlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "ListAuthorizationRules", resp, "Failure sending request") - return - } - - result.arlr, err = client.ListAuthorizationRulesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "ListAuthorizationRules", resp, "Failure responding to request") - return - } - if result.arlr.hasNextLink() && result.arlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. -func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesPreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesResponder(resp *http.Response) (result AuthorizationRuleListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listAuthorizationRulesNextResults retrieves the next set of results, if any. -func (client DisasterRecoveryConfigsClient) listAuthorizationRulesNextResults(ctx context.Context, lastResults AuthorizationRuleListResult) (result AuthorizationRuleListResult, err error) { - req, err := lastResults.authorizationRuleListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "listAuthorizationRulesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListAuthorizationRulesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "listAuthorizationRulesNextResults", resp, "Failure sending next results request") - } - result, err = client.ListAuthorizationRulesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "listAuthorizationRulesNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListAuthorizationRulesComplete enumerates all values, automatically crossing page boundaries as required. -func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesComplete(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result AuthorizationRuleListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.ListAuthorizationRules") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListAuthorizationRules(ctx, resourceGroupName, namespaceName, alias) - return -} - -// ListKeys gets the primary and secondary connection strings for the Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// alias - the Disaster Recovery configuration name -// authorizationRuleName - the authorization rule name. -func (client DisasterRecoveryConfigsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (result AccessKeys, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.ListKeys") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: alias, - Constraints: []validation.Constraint{{Target: "alias", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "alias", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.DisasterRecoveryConfigsClient", "ListKeys", err.Error()) - } - - req, err := client.ListKeysPreparer(ctx, resourceGroupName, namespaceName, alias, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "ListKeys", nil, "Failure preparing request") - return - } - - resp, err := client.ListKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "ListKeys", resp, "Failure sending request") - return - } - - result, err = client.ListKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.DisasterRecoveryConfigsClient", "ListKeys", resp, "Failure responding to request") - return - } - - return -} - -// ListKeysPreparer prepares the ListKeys request. -func (client DisasterRecoveryConfigsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "alias": autorest.Encode("path", alias), - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules/{authorizationRuleName}/listKeys", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListKeysSender sends the ListKeys request. The method will close the -// http.Response Body if it receives an error. -func (client DisasterRecoveryConfigsClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListKeysResponder handles the response to the ListKeys request. The method always -// closes the http.Response Body. -func (client DisasterRecoveryConfigsClient) ListKeysResponder(resp *http.Response) (result AccessKeys, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/enums.go deleted file mode 100644 index 2cf5242e385..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/enums.go +++ /dev/null @@ -1,198 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// AccessRights enumerates the values for access rights. -type AccessRights string - -const ( - // Listen ... - Listen AccessRights = "Listen" - // Manage ... - Manage AccessRights = "Manage" - // SendEnumValue ... - SendEnumValue AccessRights = "Send" -) - -// PossibleAccessRightsValues returns an array of possible values for the AccessRights const type. -func PossibleAccessRightsValues() []AccessRights { - return []AccessRights{Listen, Manage, SendEnumValue} -} - -// DefaultAction enumerates the values for default action. -type DefaultAction string - -const ( - // Allow ... - Allow DefaultAction = "Allow" - // Deny ... - Deny DefaultAction = "Deny" -) - -// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type. -func PossibleDefaultActionValues() []DefaultAction { - return []DefaultAction{Allow, Deny} -} - -// EncodingCaptureDescription enumerates the values for encoding capture description. -type EncodingCaptureDescription string - -const ( - // Avro ... - Avro EncodingCaptureDescription = "Avro" - // AvroDeflate ... - AvroDeflate EncodingCaptureDescription = "AvroDeflate" -) - -// PossibleEncodingCaptureDescriptionValues returns an array of possible values for the EncodingCaptureDescription const type. -func PossibleEncodingCaptureDescriptionValues() []EncodingCaptureDescription { - return []EncodingCaptureDescription{Avro, AvroDeflate} -} - -// EntityStatus enumerates the values for entity status. -type EntityStatus string - -const ( - // Active ... - Active EntityStatus = "Active" - // Creating ... - Creating EntityStatus = "Creating" - // Deleting ... - Deleting EntityStatus = "Deleting" - // Disabled ... - Disabled EntityStatus = "Disabled" - // ReceiveDisabled ... - ReceiveDisabled EntityStatus = "ReceiveDisabled" - // Renaming ... - Renaming EntityStatus = "Renaming" - // Restoring ... - Restoring EntityStatus = "Restoring" - // SendDisabled ... - SendDisabled EntityStatus = "SendDisabled" - // Unknown ... - Unknown EntityStatus = "Unknown" -) - -// PossibleEntityStatusValues returns an array of possible values for the EntityStatus const type. -func PossibleEntityStatusValues() []EntityStatus { - return []EntityStatus{Active, Creating, Deleting, Disabled, ReceiveDisabled, Renaming, Restoring, SendDisabled, Unknown} -} - -// KeyType enumerates the values for key type. -type KeyType string - -const ( - // PrimaryKey ... - PrimaryKey KeyType = "PrimaryKey" - // SecondaryKey ... - SecondaryKey KeyType = "SecondaryKey" -) - -// PossibleKeyTypeValues returns an array of possible values for the KeyType const type. -func PossibleKeyTypeValues() []KeyType { - return []KeyType{PrimaryKey, SecondaryKey} -} - -// NetworkRuleIPAction enumerates the values for network rule ip action. -type NetworkRuleIPAction string - -const ( - // NetworkRuleIPActionAllow ... - NetworkRuleIPActionAllow NetworkRuleIPAction = "Allow" -) - -// PossibleNetworkRuleIPActionValues returns an array of possible values for the NetworkRuleIPAction const type. -func PossibleNetworkRuleIPActionValues() []NetworkRuleIPAction { - return []NetworkRuleIPAction{NetworkRuleIPActionAllow} -} - -// ProvisioningStateDR enumerates the values for provisioning state dr. -type ProvisioningStateDR string - -const ( - // Accepted ... - Accepted ProvisioningStateDR = "Accepted" - // Failed ... - Failed ProvisioningStateDR = "Failed" - // Succeeded ... - Succeeded ProvisioningStateDR = "Succeeded" -) - -// PossibleProvisioningStateDRValues returns an array of possible values for the ProvisioningStateDR const type. -func PossibleProvisioningStateDRValues() []ProvisioningStateDR { - return []ProvisioningStateDR{Accepted, Failed, Succeeded} -} - -// RoleDisasterRecovery enumerates the values for role disaster recovery. -type RoleDisasterRecovery string - -const ( - // Primary ... - Primary RoleDisasterRecovery = "Primary" - // PrimaryNotReplicating ... - PrimaryNotReplicating RoleDisasterRecovery = "PrimaryNotReplicating" - // Secondary ... - Secondary RoleDisasterRecovery = "Secondary" -) - -// PossibleRoleDisasterRecoveryValues returns an array of possible values for the RoleDisasterRecovery const type. -func PossibleRoleDisasterRecoveryValues() []RoleDisasterRecovery { - return []RoleDisasterRecovery{Primary, PrimaryNotReplicating, Secondary} -} - -// SkuName enumerates the values for sku name. -type SkuName string - -const ( - // Basic ... - Basic SkuName = "Basic" - // Standard ... - Standard SkuName = "Standard" -) - -// PossibleSkuNameValues returns an array of possible values for the SkuName const type. -func PossibleSkuNameValues() []SkuName { - return []SkuName{Basic, Standard} -} - -// SkuTier enumerates the values for sku tier. -type SkuTier string - -const ( - // SkuTierBasic ... - SkuTierBasic SkuTier = "Basic" - // SkuTierStandard ... - SkuTierStandard SkuTier = "Standard" -) - -// PossibleSkuTierValues returns an array of possible values for the SkuTier const type. -func PossibleSkuTierValues() []SkuTier { - return []SkuTier{SkuTierBasic, SkuTierStandard} -} - -// UnavailableReason enumerates the values for unavailable reason. -type UnavailableReason string - -const ( - // InvalidName ... - InvalidName UnavailableReason = "InvalidName" - // NameInLockdown ... - NameInLockdown UnavailableReason = "NameInLockdown" - // NameInUse ... - NameInUse UnavailableReason = "NameInUse" - // None ... - None UnavailableReason = "None" - // SubscriptionIsDisabled ... - SubscriptionIsDisabled UnavailableReason = "SubscriptionIsDisabled" - // TooManyNamespaceInCurrentSubscription ... - TooManyNamespaceInCurrentSubscription UnavailableReason = "TooManyNamespaceInCurrentSubscription" -) - -// PossibleUnavailableReasonValues returns an array of possible values for the UnavailableReason const type. -func PossibleUnavailableReasonValues() []UnavailableReason { - return []UnavailableReason{InvalidName, NameInLockdown, NameInUse, None, SubscriptionIsDisabled, TooManyNamespaceInCurrentSubscription} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/eventhubs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/eventhubs.go deleted file mode 100644 index efc740b9196..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/eventhubs.go +++ /dev/null @@ -1,1089 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// EventHubsClient is the azure Event Hubs client -type EventHubsClient struct { - BaseClient -} - -// NewEventHubsClient creates an instance of the EventHubsClient client. -func NewEventHubsClient(subscriptionID string) EventHubsClient { - return NewEventHubsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewEventHubsClientWithBaseURI creates an instance of the EventHubsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewEventHubsClientWithBaseURI(baseURI string, subscriptionID string) EventHubsClient { - return EventHubsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates a new Event Hub as a nested resource within a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// parameters - parameters supplied to create an Event Hub resource. -func (client EventHubsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, parameters Model) (result Model, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Properties.MessageRetentionInDays", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Properties.MessageRetentionInDays", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - {Target: "parameters.Properties.PartitionCount", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Properties.PartitionCount", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}}}, - {Target: "parameters.Properties.CaptureDescription", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Properties.CaptureDescription.IntervalInSeconds", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Properties.CaptureDescription.IntervalInSeconds", Name: validation.InclusiveMaximum, Rule: int64(900), Chain: nil}, - {Target: "parameters.Properties.CaptureDescription.IntervalInSeconds", Name: validation.InclusiveMinimum, Rule: int64(60), Chain: nil}, - }}, - {Target: "parameters.Properties.CaptureDescription.SizeLimitInBytes", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Properties.CaptureDescription.SizeLimitInBytes", Name: validation.InclusiveMaximum, Rule: int64(524288000), Chain: nil}, - {Target: "parameters.Properties.CaptureDescription.SizeLimitInBytes", Name: validation.InclusiveMinimum, Rule: int64(10485760), Chain: nil}, - }}, - }}, - }}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, namespaceName, eventHubName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client EventHubsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, parameters Model) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client EventHubsClient) CreateOrUpdateResponder(resp *http.Response) (result Model, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdateAuthorizationRule creates or updates an AuthorizationRule for the specified Event Hub. Creation/update -// of the AuthorizationRule will take a few seconds to take effect. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// authorizationRuleName - the authorization rule name. -// parameters - the shared access AuthorizationRule. -func (client EventHubsClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule) (result AuthorizationRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.CreateOrUpdateAuthorizationRule") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.AuthorizationRuleProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AuthorizationRuleProperties.Rights", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "CreateOrUpdateAuthorizationRule", err.Error()) - } - - req, err := client.CreateOrUpdateAuthorizationRulePreparer(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. -func (client EventHubsClient) CreateOrUpdateAuthorizationRulePreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters AuthorizationRule) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always -// closes the http.Response Body. -func (client EventHubsClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result AuthorizationRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes an Event Hub from the specified Namespace and resource group. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -func (client EventHubsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, namespaceName, eventHubName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client EventHubsClient) DeletePreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client EventHubsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// DeleteAuthorizationRule deletes an Event Hub AuthorizationRule. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// authorizationRuleName - the authorization rule name. -func (client EventHubsClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.DeleteAuthorizationRule") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "DeleteAuthorizationRule", err.Error()) - } - - req, err := client.DeleteAuthorizationRulePreparer(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "DeleteAuthorizationRule", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteAuthorizationRuleSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "DeleteAuthorizationRule", resp, "Failure sending request") - return - } - - result, err = client.DeleteAuthorizationRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "DeleteAuthorizationRule", resp, "Failure responding to request") - return - } - - return -} - -// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. -func (client EventHubsClient) DeleteAuthorizationRulePreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always -// closes the http.Response Body. -func (client EventHubsClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets an Event Hubs description for the specified Event Hub. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -func (client EventHubsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string) (result Model, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, namespaceName, eventHubName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client EventHubsClient) GetPreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client EventHubsClient) GetResponder(resp *http.Response) (result Model, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAuthorizationRule gets an AuthorizationRule for an Event Hub by rule name. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// authorizationRuleName - the authorization rule name. -func (client EventHubsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string) (result AuthorizationRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.GetAuthorizationRule") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "GetAuthorizationRule", err.Error()) - } - - req, err := client.GetAuthorizationRulePreparer(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "GetAuthorizationRule", nil, "Failure preparing request") - return - } - - resp, err := client.GetAuthorizationRuleSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "GetAuthorizationRule", resp, "Failure sending request") - return - } - - result, err = client.GetAuthorizationRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "GetAuthorizationRule", resp, "Failure responding to request") - return - } - - return -} - -// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. -func (client EventHubsClient) GetAuthorizationRulePreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always -// closes the http.Response Body. -func (client EventHubsClient) GetAuthorizationRuleResponder(resp *http.Response) (result AuthorizationRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListAuthorizationRules gets the authorization rules for an Event Hub. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -func (client EventHubsClient) ListAuthorizationRules(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string) (result AuthorizationRuleListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.ListAuthorizationRules") - defer func() { - sc := -1 - if result.arlr.Response.Response != nil { - sc = result.arlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "ListAuthorizationRules", err.Error()) - } - - result.fn = client.listAuthorizationRulesNextResults - req, err := client.ListAuthorizationRulesPreparer(ctx, resourceGroupName, namespaceName, eventHubName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListAuthorizationRules", nil, "Failure preparing request") - return - } - - resp, err := client.ListAuthorizationRulesSender(req) - if err != nil { - result.arlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListAuthorizationRules", resp, "Failure sending request") - return - } - - result.arlr, err = client.ListAuthorizationRulesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListAuthorizationRules", resp, "Failure responding to request") - return - } - if result.arlr.hasNextLink() && result.arlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. -func (client EventHubsClient) ListAuthorizationRulesPreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always -// closes the http.Response Body. -func (client EventHubsClient) ListAuthorizationRulesResponder(resp *http.Response) (result AuthorizationRuleListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listAuthorizationRulesNextResults retrieves the next set of results, if any. -func (client EventHubsClient) listAuthorizationRulesNextResults(ctx context.Context, lastResults AuthorizationRuleListResult) (result AuthorizationRuleListResult, err error) { - req, err := lastResults.authorizationRuleListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "listAuthorizationRulesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListAuthorizationRulesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "listAuthorizationRulesNextResults", resp, "Failure sending next results request") - } - result, err = client.ListAuthorizationRulesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "listAuthorizationRulesNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListAuthorizationRulesComplete enumerates all values, automatically crossing page boundaries as required. -func (client EventHubsClient) ListAuthorizationRulesComplete(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string) (result AuthorizationRuleListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.ListAuthorizationRules") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListAuthorizationRules(ctx, resourceGroupName, namespaceName, eventHubName) - return -} - -// ListByNamespace gets all the Event Hubs in a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// skip - skip is only used if a previous operation returned a partial result. If a previous response contains -// a nextLink element, the value of the nextLink element will include a skip parameter that specifies a -// starting point to use for subsequent calls. -// top - may be used to limit the number of results to the most recent N usageDetails. -func (client EventHubsClient) ListByNamespace(ctx context.Context, resourceGroupName string, namespaceName string, skip *int32, top *int32) (result ListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.ListByNamespace") - defer func() { - sc := -1 - if result.lr.Response.Response != nil { - sc = result.lr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: skip, - Constraints: []validation.Constraint{{Target: "skip", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "skip", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, - {Target: "skip", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, - }}}}, - {TargetValue: top, - Constraints: []validation.Constraint{{Target: "top", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "top", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil}, - {Target: "top", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "ListByNamespace", err.Error()) - } - - result.fn = client.listByNamespaceNextResults - req, err := client.ListByNamespacePreparer(ctx, resourceGroupName, namespaceName, skip, top) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListByNamespace", nil, "Failure preparing request") - return - } - - resp, err := client.ListByNamespaceSender(req) - if err != nil { - result.lr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListByNamespace", resp, "Failure sending request") - return - } - - result.lr, err = client.ListByNamespaceResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListByNamespace", resp, "Failure responding to request") - return - } - if result.lr.hasNextLink() && result.lr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByNamespacePreparer prepares the ListByNamespace request. -func (client EventHubsClient) ListByNamespacePreparer(ctx context.Context, resourceGroupName string, namespaceName string, skip *int32, top *int32) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if skip != nil { - queryParameters["$skip"] = autorest.Encode("query", *skip) - } - if top != nil { - queryParameters["$top"] = autorest.Encode("query", *top) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByNamespaceSender sends the ListByNamespace request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) ListByNamespaceSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByNamespaceResponder handles the response to the ListByNamespace request. The method always -// closes the http.Response Body. -func (client EventHubsClient) ListByNamespaceResponder(resp *http.Response) (result ListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByNamespaceNextResults retrieves the next set of results, if any. -func (client EventHubsClient) listByNamespaceNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) { - req, err := lastResults.listResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "listByNamespaceNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByNamespaceSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "listByNamespaceNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByNamespaceResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "listByNamespaceNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByNamespaceComplete enumerates all values, automatically crossing page boundaries as required. -func (client EventHubsClient) ListByNamespaceComplete(ctx context.Context, resourceGroupName string, namespaceName string, skip *int32, top *int32) (result ListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.ListByNamespace") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByNamespace(ctx, resourceGroupName, namespaceName, skip, top) - return -} - -// ListKeys gets the ACS and SAS connection strings for the Event Hub. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// authorizationRuleName - the authorization rule name. -func (client EventHubsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string) (result AccessKeys, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.ListKeys") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "ListKeys", err.Error()) - } - - req, err := client.ListKeysPreparer(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListKeys", nil, "Failure preparing request") - return - } - - resp, err := client.ListKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListKeys", resp, "Failure sending request") - return - } - - result, err = client.ListKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "ListKeys", resp, "Failure responding to request") - return - } - - return -} - -// ListKeysPreparer prepares the ListKeys request. -func (client EventHubsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}/listKeys", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListKeysSender sends the ListKeys request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListKeysResponder handles the response to the ListKeys request. The method always -// closes the http.Response Body. -func (client EventHubsClient) ListKeysResponder(resp *http.Response) (result AccessKeys, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateKeys regenerates the ACS and SAS connection strings for the Event Hub. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// eventHubName - the Event Hub name -// authorizationRuleName - the authorization rule name. -// parameters - parameters supplied to regenerate the AuthorizationRule Keys (PrimaryKey/SecondaryKey). -func (client EventHubsClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.RegenerateKeys") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: eventHubName, - Constraints: []validation.Constraint{{Target: "eventHubName", Name: validation.MaxLength, Rule: 256, Chain: nil}, - {Target: "eventHubName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.EventHubsClient", "RegenerateKeys", err.Error()) - } - - req, err := client.RegenerateKeysPreparer(ctx, resourceGroupName, namespaceName, eventHubName, authorizationRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "RegenerateKeys", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "RegenerateKeys", resp, "Failure sending request") - return - } - - result, err = client.RegenerateKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.EventHubsClient", "RegenerateKeys", resp, "Failure responding to request") - return - } - - return -} - -// RegenerateKeysPreparer prepares the RegenerateKeys request. -func (client EventHubsClient) RegenerateKeysPreparer(ctx context.Context, resourceGroupName string, namespaceName string, eventHubName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "eventHubName": autorest.Encode("path", eventHubName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/eventhubs/{eventHubName}/authorizationRules/{authorizationRuleName}/regenerateKeys", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RegenerateKeysSender sends the RegenerateKeys request. The method will close the -// http.Response Body if it receives an error. -func (client EventHubsClient) RegenerateKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// RegenerateKeysResponder handles the response to the RegenerateKeys request. The method always -// closes the http.Response Body. -func (client EventHubsClient) RegenerateKeysResponder(resp *http.Response) (result AccessKeys, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go deleted file mode 100644 index b8a3b0f49aa..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/models.go +++ /dev/null @@ -1,2434 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub" - -// AccessKeys namespace/EventHub Connection String -type AccessKeys struct { - autorest.Response `json:"-"` - // PrimaryConnectionString - READ-ONLY; Primary connection string of the created namespace AuthorizationRule. - PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"` - // SecondaryConnectionString - READ-ONLY; Secondary connection string of the created namespace AuthorizationRule. - SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"` - // AliasPrimaryConnectionString - READ-ONLY; Primary connection string of the alias if GEO DR is enabled - AliasPrimaryConnectionString *string `json:"aliasPrimaryConnectionString,omitempty"` - // AliasSecondaryConnectionString - READ-ONLY; Secondary connection string of the alias if GEO DR is enabled - AliasSecondaryConnectionString *string `json:"aliasSecondaryConnectionString,omitempty"` - // PrimaryKey - READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token. - PrimaryKey *string `json:"primaryKey,omitempty"` - // SecondaryKey - READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token. - SecondaryKey *string `json:"secondaryKey,omitempty"` - // KeyName - READ-ONLY; A string that describes the AuthorizationRule. - KeyName *string `json:"keyName,omitempty"` -} - -// MarshalJSON is the custom marshaler for AccessKeys. -func (ak AccessKeys) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ArmDisasterRecovery single item in List or Get Alias(Disaster Recovery configuration) operation -type ArmDisasterRecovery struct { - autorest.Response `json:"-"` - // ArmDisasterRecoveryProperties - Properties required to the Create Or Update Alias(Disaster Recovery configurations) - *ArmDisasterRecoveryProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ArmDisasterRecovery. -func (adr ArmDisasterRecovery) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if adr.ArmDisasterRecoveryProperties != nil { - objectMap["properties"] = adr.ArmDisasterRecoveryProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ArmDisasterRecovery struct. -func (adr *ArmDisasterRecovery) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var armDisasterRecoveryProperties ArmDisasterRecoveryProperties - err = json.Unmarshal(*v, &armDisasterRecoveryProperties) - if err != nil { - return err - } - adr.ArmDisasterRecoveryProperties = &armDisasterRecoveryProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - adr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - adr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - adr.Type = &typeVar - } - } - } - - return nil -} - -// ArmDisasterRecoveryListResult the result of the List Alias(Disaster Recovery configuration) operation. -type ArmDisasterRecoveryListResult struct { - autorest.Response `json:"-"` - // Value - List of Alias(Disaster Recovery configurations) - Value *[]ArmDisasterRecovery `json:"value,omitempty"` - // NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of Alias(Disaster Recovery configuration) - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for ArmDisasterRecoveryListResult. -func (adrlr ArmDisasterRecoveryListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if adrlr.Value != nil { - objectMap["value"] = adrlr.Value - } - return json.Marshal(objectMap) -} - -// ArmDisasterRecoveryListResultIterator provides access to a complete listing of ArmDisasterRecovery -// values. -type ArmDisasterRecoveryListResultIterator struct { - i int - page ArmDisasterRecoveryListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ArmDisasterRecoveryListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ArmDisasterRecoveryListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ArmDisasterRecoveryListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ArmDisasterRecoveryListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ArmDisasterRecoveryListResultIterator) Response() ArmDisasterRecoveryListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ArmDisasterRecoveryListResultIterator) Value() ArmDisasterRecovery { - if !iter.page.NotDone() { - return ArmDisasterRecovery{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ArmDisasterRecoveryListResultIterator type. -func NewArmDisasterRecoveryListResultIterator(page ArmDisasterRecoveryListResultPage) ArmDisasterRecoveryListResultIterator { - return ArmDisasterRecoveryListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (adrlr ArmDisasterRecoveryListResult) IsEmpty() bool { - return adrlr.Value == nil || len(*adrlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (adrlr ArmDisasterRecoveryListResult) hasNextLink() bool { - return adrlr.NextLink != nil && len(*adrlr.NextLink) != 0 -} - -// armDisasterRecoveryListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (adrlr ArmDisasterRecoveryListResult) armDisasterRecoveryListResultPreparer(ctx context.Context) (*http.Request, error) { - if !adrlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(adrlr.NextLink))) -} - -// ArmDisasterRecoveryListResultPage contains a page of ArmDisasterRecovery values. -type ArmDisasterRecoveryListResultPage struct { - fn func(context.Context, ArmDisasterRecoveryListResult) (ArmDisasterRecoveryListResult, error) - adrlr ArmDisasterRecoveryListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ArmDisasterRecoveryListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ArmDisasterRecoveryListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.adrlr) - if err != nil { - return err - } - page.adrlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ArmDisasterRecoveryListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ArmDisasterRecoveryListResultPage) NotDone() bool { - return !page.adrlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ArmDisasterRecoveryListResultPage) Response() ArmDisasterRecoveryListResult { - return page.adrlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ArmDisasterRecoveryListResultPage) Values() []ArmDisasterRecovery { - if page.adrlr.IsEmpty() { - return nil - } - return *page.adrlr.Value -} - -// Creates a new instance of the ArmDisasterRecoveryListResultPage type. -func NewArmDisasterRecoveryListResultPage(cur ArmDisasterRecoveryListResult, getNextPage func(context.Context, ArmDisasterRecoveryListResult) (ArmDisasterRecoveryListResult, error)) ArmDisasterRecoveryListResultPage { - return ArmDisasterRecoveryListResultPage{ - fn: getNextPage, - adrlr: cur, - } -} - -// ArmDisasterRecoveryProperties properties required to the Create Or Update Alias(Disaster Recovery -// configurations) -type ArmDisasterRecoveryProperties struct { - // ProvisioningState - READ-ONLY; Provisioning state of the Alias(Disaster Recovery configuration) - possible values 'Accepted' or 'Succeeded' or 'Failed'. Possible values include: 'Accepted', 'Succeeded', 'Failed' - ProvisioningState ProvisioningStateDR `json:"provisioningState,omitempty"` - // PartnerNamespace - ARM Id of the Primary/Secondary eventhub namespace name, which is part of GEO DR pairing - PartnerNamespace *string `json:"partnerNamespace,omitempty"` - // AlternateName - Alternate name specified when alias and namespace names are same. - AlternateName *string `json:"alternateName,omitempty"` - // Role - READ-ONLY; role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or 'Secondary'. Possible values include: 'Primary', 'PrimaryNotReplicating', 'Secondary' - Role RoleDisasterRecovery `json:"role,omitempty"` - // PendingReplicationOperationsCount - READ-ONLY; Number of entities pending to be replicated. - PendingReplicationOperationsCount *int64 `json:"pendingReplicationOperationsCount,omitempty"` -} - -// MarshalJSON is the custom marshaler for ArmDisasterRecoveryProperties. -func (adr ArmDisasterRecoveryProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if adr.PartnerNamespace != nil { - objectMap["partnerNamespace"] = adr.PartnerNamespace - } - if adr.AlternateName != nil { - objectMap["alternateName"] = adr.AlternateName - } - return json.Marshal(objectMap) -} - -// AuthorizationRule single item in a List or Get AuthorizationRule operation -type AuthorizationRule struct { - autorest.Response `json:"-"` - // AuthorizationRuleProperties - Properties supplied to create or update AuthorizationRule - *AuthorizationRuleProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for AuthorizationRule. -func (ar AuthorizationRule) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ar.AuthorizationRuleProperties != nil { - objectMap["properties"] = ar.AuthorizationRuleProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AuthorizationRule struct. -func (ar *AuthorizationRule) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var authorizationRuleProperties AuthorizationRuleProperties - err = json.Unmarshal(*v, &authorizationRuleProperties) - if err != nil { - return err - } - ar.AuthorizationRuleProperties = &authorizationRuleProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - ar.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ar.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - ar.Type = &typeVar - } - } - } - - return nil -} - -// AuthorizationRuleListResult the response from the List namespace operation. -type AuthorizationRuleListResult struct { - autorest.Response `json:"-"` - // Value - Result of the List Authorization Rules operation. - Value *[]AuthorizationRule `json:"value,omitempty"` - // NextLink - Link to the next set of results. Not empty if Value contains an incomplete list of Authorization Rules - NextLink *string `json:"nextLink,omitempty"` -} - -// AuthorizationRuleListResultIterator provides access to a complete listing of AuthorizationRule values. -type AuthorizationRuleListResultIterator struct { - i int - page AuthorizationRuleListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *AuthorizationRuleListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AuthorizationRuleListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *AuthorizationRuleListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter AuthorizationRuleListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter AuthorizationRuleListResultIterator) Response() AuthorizationRuleListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter AuthorizationRuleListResultIterator) Value() AuthorizationRule { - if !iter.page.NotDone() { - return AuthorizationRule{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the AuthorizationRuleListResultIterator type. -func NewAuthorizationRuleListResultIterator(page AuthorizationRuleListResultPage) AuthorizationRuleListResultIterator { - return AuthorizationRuleListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (arlr AuthorizationRuleListResult) IsEmpty() bool { - return arlr.Value == nil || len(*arlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (arlr AuthorizationRuleListResult) hasNextLink() bool { - return arlr.NextLink != nil && len(*arlr.NextLink) != 0 -} - -// authorizationRuleListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (arlr AuthorizationRuleListResult) authorizationRuleListResultPreparer(ctx context.Context) (*http.Request, error) { - if !arlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(arlr.NextLink))) -} - -// AuthorizationRuleListResultPage contains a page of AuthorizationRule values. -type AuthorizationRuleListResultPage struct { - fn func(context.Context, AuthorizationRuleListResult) (AuthorizationRuleListResult, error) - arlr AuthorizationRuleListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *AuthorizationRuleListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AuthorizationRuleListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.arlr) - if err != nil { - return err - } - page.arlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *AuthorizationRuleListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page AuthorizationRuleListResultPage) NotDone() bool { - return !page.arlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page AuthorizationRuleListResultPage) Response() AuthorizationRuleListResult { - return page.arlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page AuthorizationRuleListResultPage) Values() []AuthorizationRule { - if page.arlr.IsEmpty() { - return nil - } - return *page.arlr.Value -} - -// Creates a new instance of the AuthorizationRuleListResultPage type. -func NewAuthorizationRuleListResultPage(cur AuthorizationRuleListResult, getNextPage func(context.Context, AuthorizationRuleListResult) (AuthorizationRuleListResult, error)) AuthorizationRuleListResultPage { - return AuthorizationRuleListResultPage{ - fn: getNextPage, - arlr: cur, - } -} - -// AuthorizationRuleProperties properties supplied to create or update AuthorizationRule -type AuthorizationRuleProperties struct { - // Rights - The rights associated with the rule. - Rights *[]AccessRights `json:"rights,omitempty"` -} - -// CaptureDescription properties to configure capture description for eventhub -type CaptureDescription struct { - // Enabled - A value that indicates whether capture description is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Encoding - Enumerates the possible values for the encoding format of capture description. Note: 'AvroDeflate' will be deprecated in New API Version. Possible values include: 'Avro', 'AvroDeflate' - Encoding EncodingCaptureDescription `json:"encoding,omitempty"` - // IntervalInSeconds - The time window allows you to set the frequency with which the capture to Azure Blobs will happen, value should between 60 to 900 seconds - IntervalInSeconds *int32 `json:"intervalInSeconds,omitempty"` - // SizeLimitInBytes - The size window defines the amount of data built up in your Event Hub before an capture operation, value should be between 10485760 to 524288000 bytes - SizeLimitInBytes *int32 `json:"sizeLimitInBytes,omitempty"` - // Destination - Properties of Destination where capture will be stored. (Storage Account, Blob Names) - Destination *Destination `json:"destination,omitempty"` - // SkipEmptyArchives - A value that indicates whether to Skip Empty Archives - SkipEmptyArchives *bool `json:"skipEmptyArchives,omitempty"` -} - -// CheckNameAvailabilityParameter parameter supplied to check Namespace name availability operation -type CheckNameAvailabilityParameter struct { - // Name - Name to check the namespace name availability - Name *string `json:"name,omitempty"` -} - -// CheckNameAvailabilityResult the Result of the CheckNameAvailability operation -type CheckNameAvailabilityResult struct { - autorest.Response `json:"-"` - // Message - READ-ONLY; The detailed info regarding the reason associated with the Namespace. - Message *string `json:"message,omitempty"` - // NameAvailable - Value indicating Namespace is availability, true if the Namespace is available; otherwise, false. - NameAvailable *bool `json:"nameAvailable,omitempty"` - // Reason - The reason for unavailability of a Namespace. Possible values include: 'None', 'InvalidName', 'SubscriptionIsDisabled', 'NameInUse', 'NameInLockdown', 'TooManyNamespaceInCurrentSubscription' - Reason UnavailableReason `json:"reason,omitempty"` -} - -// MarshalJSON is the custom marshaler for CheckNameAvailabilityResult. -func (cnar CheckNameAvailabilityResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cnar.NameAvailable != nil { - objectMap["nameAvailable"] = cnar.NameAvailable - } - if cnar.Reason != "" { - objectMap["reason"] = cnar.Reason - } - return json.Marshal(objectMap) -} - -// ConsumerGroup single item in List or Get Consumer group operation -type ConsumerGroup struct { - autorest.Response `json:"-"` - // ConsumerGroupProperties - Single item in List or Get Consumer group operation - *ConsumerGroupProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ConsumerGroup. -func (cg ConsumerGroup) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cg.ConsumerGroupProperties != nil { - objectMap["properties"] = cg.ConsumerGroupProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ConsumerGroup struct. -func (cg *ConsumerGroup) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var consumerGroupProperties ConsumerGroupProperties - err = json.Unmarshal(*v, &consumerGroupProperties) - if err != nil { - return err - } - cg.ConsumerGroupProperties = &consumerGroupProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - cg.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - cg.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - cg.Type = &typeVar - } - } - } - - return nil -} - -// ConsumerGroupListResult the result to the List Consumer Group operation. -type ConsumerGroupListResult struct { - autorest.Response `json:"-"` - // Value - Result of the List Consumer Group operation. - Value *[]ConsumerGroup `json:"value,omitempty"` - // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of Consumer Group - NextLink *string `json:"nextLink,omitempty"` -} - -// ConsumerGroupListResultIterator provides access to a complete listing of ConsumerGroup values. -type ConsumerGroupListResultIterator struct { - i int - page ConsumerGroupListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ConsumerGroupListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ConsumerGroupListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ConsumerGroupListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ConsumerGroupListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ConsumerGroupListResultIterator) Response() ConsumerGroupListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ConsumerGroupListResultIterator) Value() ConsumerGroup { - if !iter.page.NotDone() { - return ConsumerGroup{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ConsumerGroupListResultIterator type. -func NewConsumerGroupListResultIterator(page ConsumerGroupListResultPage) ConsumerGroupListResultIterator { - return ConsumerGroupListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (cglr ConsumerGroupListResult) IsEmpty() bool { - return cglr.Value == nil || len(*cglr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (cglr ConsumerGroupListResult) hasNextLink() bool { - return cglr.NextLink != nil && len(*cglr.NextLink) != 0 -} - -// consumerGroupListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (cglr ConsumerGroupListResult) consumerGroupListResultPreparer(ctx context.Context) (*http.Request, error) { - if !cglr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(cglr.NextLink))) -} - -// ConsumerGroupListResultPage contains a page of ConsumerGroup values. -type ConsumerGroupListResultPage struct { - fn func(context.Context, ConsumerGroupListResult) (ConsumerGroupListResult, error) - cglr ConsumerGroupListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ConsumerGroupListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ConsumerGroupListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.cglr) - if err != nil { - return err - } - page.cglr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ConsumerGroupListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ConsumerGroupListResultPage) NotDone() bool { - return !page.cglr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ConsumerGroupListResultPage) Response() ConsumerGroupListResult { - return page.cglr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ConsumerGroupListResultPage) Values() []ConsumerGroup { - if page.cglr.IsEmpty() { - return nil - } - return *page.cglr.Value -} - -// Creates a new instance of the ConsumerGroupListResultPage type. -func NewConsumerGroupListResultPage(cur ConsumerGroupListResult, getNextPage func(context.Context, ConsumerGroupListResult) (ConsumerGroupListResult, error)) ConsumerGroupListResultPage { - return ConsumerGroupListResultPage{ - fn: getNextPage, - cglr: cur, - } -} - -// ConsumerGroupProperties single item in List or Get Consumer group operation -type ConsumerGroupProperties struct { - // CreatedAt - READ-ONLY; Exact time the message was created. - CreatedAt *date.Time `json:"createdAt,omitempty"` - // UpdatedAt - READ-ONLY; The exact time the message was updated. - UpdatedAt *date.Time `json:"updatedAt,omitempty"` - // UserMetadata - User Metadata is a placeholder to store user-defined string data with maximum length 1024. e.g. it can be used to store descriptive data, such as list of teams and their contact information also user-defined configuration settings can be stored. - UserMetadata *string `json:"userMetadata,omitempty"` -} - -// MarshalJSON is the custom marshaler for ConsumerGroupProperties. -func (cg ConsumerGroupProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cg.UserMetadata != nil { - objectMap["userMetadata"] = cg.UserMetadata - } - return json.Marshal(objectMap) -} - -// Destination capture storage details for capture description -type Destination struct { - // Name - Name for capture destination - Name *string `json:"name,omitempty"` - // DestinationProperties - Properties describing the storage account, blob container and archive name format for capture destination - *DestinationProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for Destination. -func (d Destination) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if d.Name != nil { - objectMap["name"] = d.Name - } - if d.DestinationProperties != nil { - objectMap["properties"] = d.DestinationProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Destination struct. -func (d *Destination) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - d.Name = &name - } - case "properties": - if v != nil { - var destinationProperties DestinationProperties - err = json.Unmarshal(*v, &destinationProperties) - if err != nil { - return err - } - d.DestinationProperties = &destinationProperties - } - } - } - - return nil -} - -// DestinationProperties properties describing the storage account, blob container and archive name format -// for capture destination -type DestinationProperties struct { - // StorageAccountResourceID - Resource id of the storage account to be used to create the blobs - StorageAccountResourceID *string `json:"storageAccountResourceId,omitempty"` - // BlobContainer - Blob container Name - BlobContainer *string `json:"blobContainer,omitempty"` - // ArchiveNameFormat - Blob naming convention for archive, e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order - ArchiveNameFormat *string `json:"archiveNameFormat,omitempty"` -} - -// EHNamespace single Namespace item in List or Get Operation -type EHNamespace struct { - autorest.Response `json:"-"` - // Sku - Properties of sku resource - Sku *Sku `json:"sku,omitempty"` - // EHNamespaceProperties - Namespace properties supplied for create namespace operation. - *EHNamespaceProperties `json:"properties,omitempty"` - // Location - Resource location. - Location *string `json:"location,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for EHNamespace. -func (en EHNamespace) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if en.Sku != nil { - objectMap["sku"] = en.Sku - } - if en.EHNamespaceProperties != nil { - objectMap["properties"] = en.EHNamespaceProperties - } - if en.Location != nil { - objectMap["location"] = en.Location - } - if en.Tags != nil { - objectMap["tags"] = en.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for EHNamespace struct. -func (en *EHNamespace) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "sku": - if v != nil { - var sku Sku - err = json.Unmarshal(*v, &sku) - if err != nil { - return err - } - en.Sku = &sku - } - case "properties": - if v != nil { - var eHNamespaceProperties EHNamespaceProperties - err = json.Unmarshal(*v, &eHNamespaceProperties) - if err != nil { - return err - } - en.EHNamespaceProperties = &eHNamespaceProperties - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - en.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - en.Tags = tags - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - en.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - en.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - en.Type = &typeVar - } - } - } - - return nil -} - -// EHNamespaceListResult the response of the List Namespace operation -type EHNamespaceListResult struct { - autorest.Response `json:"-"` - // Value - Result of the List Namespace operation - Value *[]EHNamespace `json:"value,omitempty"` - // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of namespaces. - NextLink *string `json:"nextLink,omitempty"` -} - -// EHNamespaceListResultIterator provides access to a complete listing of EHNamespace values. -type EHNamespaceListResultIterator struct { - i int - page EHNamespaceListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *EHNamespaceListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EHNamespaceListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *EHNamespaceListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter EHNamespaceListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter EHNamespaceListResultIterator) Response() EHNamespaceListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter EHNamespaceListResultIterator) Value() EHNamespace { - if !iter.page.NotDone() { - return EHNamespace{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the EHNamespaceListResultIterator type. -func NewEHNamespaceListResultIterator(page EHNamespaceListResultPage) EHNamespaceListResultIterator { - return EHNamespaceListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (enlr EHNamespaceListResult) IsEmpty() bool { - return enlr.Value == nil || len(*enlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (enlr EHNamespaceListResult) hasNextLink() bool { - return enlr.NextLink != nil && len(*enlr.NextLink) != 0 -} - -// eHNamespaceListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (enlr EHNamespaceListResult) eHNamespaceListResultPreparer(ctx context.Context) (*http.Request, error) { - if !enlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(enlr.NextLink))) -} - -// EHNamespaceListResultPage contains a page of EHNamespace values. -type EHNamespaceListResultPage struct { - fn func(context.Context, EHNamespaceListResult) (EHNamespaceListResult, error) - enlr EHNamespaceListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *EHNamespaceListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EHNamespaceListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.enlr) - if err != nil { - return err - } - page.enlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *EHNamespaceListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page EHNamespaceListResultPage) NotDone() bool { - return !page.enlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page EHNamespaceListResultPage) Response() EHNamespaceListResult { - return page.enlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page EHNamespaceListResultPage) Values() []EHNamespace { - if page.enlr.IsEmpty() { - return nil - } - return *page.enlr.Value -} - -// Creates a new instance of the EHNamespaceListResultPage type. -func NewEHNamespaceListResultPage(cur EHNamespaceListResult, getNextPage func(context.Context, EHNamespaceListResult) (EHNamespaceListResult, error)) EHNamespaceListResultPage { - return EHNamespaceListResultPage{ - fn: getNextPage, - enlr: cur, - } -} - -// EHNamespaceProperties namespace properties supplied for create namespace operation. -type EHNamespaceProperties struct { - // ProvisioningState - READ-ONLY; Provisioning state of the Namespace. - ProvisioningState *string `json:"provisioningState,omitempty"` - // CreatedAt - READ-ONLY; The time the Namespace was created. - CreatedAt *date.Time `json:"createdAt,omitempty"` - // UpdatedAt - READ-ONLY; The time the Namespace was updated. - UpdatedAt *date.Time `json:"updatedAt,omitempty"` - // ServiceBusEndpoint - READ-ONLY; Endpoint you can use to perform Service Bus operations. - ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"` - // MetricID - READ-ONLY; Identifier for Azure Insights metrics. - MetricID *string `json:"metricId,omitempty"` - // IsAutoInflateEnabled - Value that indicates whether AutoInflate is enabled for eventhub namespace. - IsAutoInflateEnabled *bool `json:"isAutoInflateEnabled,omitempty"` - // MaximumThroughputUnits - Upper limit of throughput units when AutoInflate is enabled, value should be within 0 to 20 throughput units. ( '0' if AutoInflateEnabled = true) - MaximumThroughputUnits *int32 `json:"maximumThroughputUnits,omitempty"` - // KafkaEnabled - Value that indicates whether Kafka is enabled for eventhub namespace. - KafkaEnabled *bool `json:"kafkaEnabled,omitempty"` -} - -// MarshalJSON is the custom marshaler for EHNamespaceProperties. -func (en EHNamespaceProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if en.IsAutoInflateEnabled != nil { - objectMap["isAutoInflateEnabled"] = en.IsAutoInflateEnabled - } - if en.MaximumThroughputUnits != nil { - objectMap["maximumThroughputUnits"] = en.MaximumThroughputUnits - } - if en.KafkaEnabled != nil { - objectMap["kafkaEnabled"] = en.KafkaEnabled - } - return json.Marshal(objectMap) -} - -// ErrorResponse error response indicates Event Hub service is not able to process the incoming request. -// The reason is provided in the error message. -type ErrorResponse struct { - // Code - Error code. - Code *string `json:"code,omitempty"` - // Message - Error message indicating why the operation failed. - Message *string `json:"message,omitempty"` -} - -// ListResult the result of the List EventHubs operation. -type ListResult struct { - autorest.Response `json:"-"` - // Value - Result of the List EventHubs operation. - Value *[]Model `json:"value,omitempty"` - // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of EventHubs. - NextLink *string `json:"nextLink,omitempty"` -} - -// ListResultIterator provides access to a complete listing of Model values. -type ListResultIterator struct { - i int - page ListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *ListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter ListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter ListResultIterator) Response() ListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter ListResultIterator) Value() Model { - if !iter.page.NotDone() { - return Model{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the ListResultIterator type. -func NewListResultIterator(page ListResultPage) ListResultIterator { - return ListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (lr ListResult) IsEmpty() bool { - return lr.Value == nil || len(*lr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (lr ListResult) hasNextLink() bool { - return lr.NextLink != nil && len(*lr.NextLink) != 0 -} - -// listResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) { - if !lr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(lr.NextLink))) -} - -// ListResultPage contains a page of Model values. -type ListResultPage struct { - fn func(context.Context, ListResult) (ListResult, error) - lr ListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.lr) - if err != nil { - return err - } - page.lr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *ListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page ListResultPage) NotDone() bool { - return !page.lr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page ListResultPage) Response() ListResult { - return page.lr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page ListResultPage) Values() []Model { - if page.lr.IsEmpty() { - return nil - } - return *page.lr.Value -} - -// Creates a new instance of the ListResultPage type. -func NewListResultPage(cur ListResult, getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage { - return ListResultPage{ - fn: getNextPage, - lr: cur, - } -} - -// MessagingPlan messaging Plan for the namespace -type MessagingPlan struct { - autorest.Response `json:"-"` - *MessagingPlanProperties `json:"properties,omitempty"` - // Location - Resource location. - Location *string `json:"location,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for MessagingPlan. -func (mp MessagingPlan) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mp.MessagingPlanProperties != nil { - objectMap["properties"] = mp.MessagingPlanProperties - } - if mp.Location != nil { - objectMap["location"] = mp.Location - } - if mp.Tags != nil { - objectMap["tags"] = mp.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for MessagingPlan struct. -func (mp *MessagingPlan) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var messagingPlanProperties MessagingPlanProperties - err = json.Unmarshal(*v, &messagingPlanProperties) - if err != nil { - return err - } - mp.MessagingPlanProperties = &messagingPlanProperties - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - mp.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - mp.Tags = tags - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mp.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mp.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mp.Type = &typeVar - } - } - } - - return nil -} - -// MessagingPlanProperties ... -type MessagingPlanProperties struct { - // Sku - READ-ONLY; Sku type - Sku *int32 `json:"sku,omitempty"` - // SelectedEventHubUnit - READ-ONLY; Selected event hub unit - SelectedEventHubUnit *int32 `json:"selectedEventHubUnit,omitempty"` - // UpdatedAt - READ-ONLY; The exact time the messaging plan was updated. - UpdatedAt *date.Time `json:"updatedAt,omitempty"` - // Revision - READ-ONLY; revision number - Revision *int64 `json:"revision,omitempty"` -} - -// MarshalJSON is the custom marshaler for MessagingPlanProperties. -func (mp MessagingPlanProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// MessagingRegions messaging Region -type MessagingRegions struct { - // Properties - Properties of Messaging Region - Properties *MessagingRegionsProperties `json:"properties,omitempty"` - // Location - Resource location. - Location *string `json:"location,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for MessagingRegions. -func (mr MessagingRegions) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mr.Properties != nil { - objectMap["properties"] = mr.Properties - } - if mr.Location != nil { - objectMap["location"] = mr.Location - } - if mr.Tags != nil { - objectMap["tags"] = mr.Tags - } - return json.Marshal(objectMap) -} - -// MessagingRegionsListResult the response of the List MessagingRegions operation. -type MessagingRegionsListResult struct { - autorest.Response `json:"-"` - // Value - Result of the List MessagingRegions type. - Value *[]MessagingRegions `json:"value,omitempty"` - // NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of MessagingRegions. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for MessagingRegionsListResult. -func (mrlr MessagingRegionsListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mrlr.Value != nil { - objectMap["value"] = mrlr.Value - } - return json.Marshal(objectMap) -} - -// MessagingRegionsListResultIterator provides access to a complete listing of MessagingRegions values. -type MessagingRegionsListResultIterator struct { - i int - page MessagingRegionsListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *MessagingRegionsListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MessagingRegionsListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *MessagingRegionsListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter MessagingRegionsListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter MessagingRegionsListResultIterator) Response() MessagingRegionsListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter MessagingRegionsListResultIterator) Value() MessagingRegions { - if !iter.page.NotDone() { - return MessagingRegions{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the MessagingRegionsListResultIterator type. -func NewMessagingRegionsListResultIterator(page MessagingRegionsListResultPage) MessagingRegionsListResultIterator { - return MessagingRegionsListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (mrlr MessagingRegionsListResult) IsEmpty() bool { - return mrlr.Value == nil || len(*mrlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (mrlr MessagingRegionsListResult) hasNextLink() bool { - return mrlr.NextLink != nil && len(*mrlr.NextLink) != 0 -} - -// messagingRegionsListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (mrlr MessagingRegionsListResult) messagingRegionsListResultPreparer(ctx context.Context) (*http.Request, error) { - if !mrlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(mrlr.NextLink))) -} - -// MessagingRegionsListResultPage contains a page of MessagingRegions values. -type MessagingRegionsListResultPage struct { - fn func(context.Context, MessagingRegionsListResult) (MessagingRegionsListResult, error) - mrlr MessagingRegionsListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *MessagingRegionsListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MessagingRegionsListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.mrlr) - if err != nil { - return err - } - page.mrlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *MessagingRegionsListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page MessagingRegionsListResultPage) NotDone() bool { - return !page.mrlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page MessagingRegionsListResultPage) Response() MessagingRegionsListResult { - return page.mrlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page MessagingRegionsListResultPage) Values() []MessagingRegions { - if page.mrlr.IsEmpty() { - return nil - } - return *page.mrlr.Value -} - -// Creates a new instance of the MessagingRegionsListResultPage type. -func NewMessagingRegionsListResultPage(cur MessagingRegionsListResult, getNextPage func(context.Context, MessagingRegionsListResult) (MessagingRegionsListResult, error)) MessagingRegionsListResultPage { - return MessagingRegionsListResultPage{ - fn: getNextPage, - mrlr: cur, - } -} - -// MessagingRegionsProperties properties of Messaging Region -type MessagingRegionsProperties struct { - // Code - READ-ONLY; Region code - Code *string `json:"code,omitempty"` - // FullName - READ-ONLY; Full name of the region - FullName *string `json:"fullName,omitempty"` -} - -// MarshalJSON is the custom marshaler for MessagingRegionsProperties. -func (mr MessagingRegionsProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Model single item in List or Get Event Hub operation -type Model struct { - autorest.Response `json:"-"` - // Properties - Properties supplied to the Create Or Update Event Hub operation. - *Properties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Model. -func (mVar Model) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mVar.Properties != nil { - objectMap["properties"] = mVar.Properties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for Model struct. -func (mVar *Model) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var properties Properties - err = json.Unmarshal(*v, &properties) - if err != nil { - return err - } - mVar.Properties = &properties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mVar.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mVar.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mVar.Type = &typeVar - } - } - } - - return nil -} - -// NamespacesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a -// long-running operation. -type NamespacesCreateOrUpdateFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(NamespacesClient) (EHNamespace, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *NamespacesCreateOrUpdateFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for NamespacesCreateOrUpdateFuture.Result. -func (future *NamespacesCreateOrUpdateFuture) result(client NamespacesClient) (en EHNamespace, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - en.Response.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("eventhub.NamespacesCreateOrUpdateFuture") - return - } - sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) - if en.Response.Response, err = future.GetResult(sender); err == nil && en.Response.Response.StatusCode != http.StatusNoContent { - en, err = client.CreateOrUpdateResponder(en.Response.Response) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesCreateOrUpdateFuture", "Result", en.Response.Response, "Failure responding to request") - } - } - return -} - -// NamespacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running -// operation. -type NamespacesDeleteFuture struct { - azure.FutureAPI - // Result returns the result of the asynchronous operation. - // If the operation has not completed it will return an error. - Result func(NamespacesClient) (autorest.Response, error) -} - -// UnmarshalJSON is the custom unmarshaller for CreateFuture. -func (future *NamespacesDeleteFuture) UnmarshalJSON(body []byte) error { - var azFuture azure.Future - if err := json.Unmarshal(body, &azFuture); err != nil { - return err - } - future.FutureAPI = &azFuture - future.Result = future.result - return nil -} - -// result is the default implementation for NamespacesDeleteFuture.Result. -func (future *NamespacesDeleteFuture) result(client NamespacesClient) (ar autorest.Response, err error) { - var done bool - done, err = future.DoneWithContext(context.Background(), client) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesDeleteFuture", "Result", future.Response(), "Polling failure") - return - } - if !done { - ar.Response = future.Response() - err = azure.NewAsyncOpIncompleteError("eventhub.NamespacesDeleteFuture") - return - } - ar.Response = future.Response() - return -} - -// NetworkRuleSet description of NetworkRuleSet resource. -type NetworkRuleSet struct { - autorest.Response `json:"-"` - // NetworkRuleSetProperties - NetworkRuleSet properties - *NetworkRuleSetProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for NetworkRuleSet. -func (nrs NetworkRuleSet) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if nrs.NetworkRuleSetProperties != nil { - objectMap["properties"] = nrs.NetworkRuleSetProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for NetworkRuleSet struct. -func (nrs *NetworkRuleSet) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var networkRuleSetProperties NetworkRuleSetProperties - err = json.Unmarshal(*v, &networkRuleSetProperties) - if err != nil { - return err - } - nrs.NetworkRuleSetProperties = &networkRuleSetProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - nrs.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - nrs.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - nrs.Type = &typeVar - } - } - } - - return nil -} - -// NetworkRuleSetListResult the response of the List NetworkRuleSet operation -type NetworkRuleSetListResult struct { - autorest.Response `json:"-"` - // Value - Result of the List NetworkRuleSet operation. - Value *[]NetworkRuleSet `json:"value,omitempty"` - // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of NetworkRuleSet. - NextLink *string `json:"nextLink,omitempty"` -} - -// NetworkRuleSetListResultIterator provides access to a complete listing of NetworkRuleSet values. -type NetworkRuleSetListResultIterator struct { - i int - page NetworkRuleSetListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *NetworkRuleSetListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NetworkRuleSetListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *NetworkRuleSetListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter NetworkRuleSetListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter NetworkRuleSetListResultIterator) Response() NetworkRuleSetListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter NetworkRuleSetListResultIterator) Value() NetworkRuleSet { - if !iter.page.NotDone() { - return NetworkRuleSet{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the NetworkRuleSetListResultIterator type. -func NewNetworkRuleSetListResultIterator(page NetworkRuleSetListResultPage) NetworkRuleSetListResultIterator { - return NetworkRuleSetListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (nrslr NetworkRuleSetListResult) IsEmpty() bool { - return nrslr.Value == nil || len(*nrslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (nrslr NetworkRuleSetListResult) hasNextLink() bool { - return nrslr.NextLink != nil && len(*nrslr.NextLink) != 0 -} - -// networkRuleSetListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (nrslr NetworkRuleSetListResult) networkRuleSetListResultPreparer(ctx context.Context) (*http.Request, error) { - if !nrslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(nrslr.NextLink))) -} - -// NetworkRuleSetListResultPage contains a page of NetworkRuleSet values. -type NetworkRuleSetListResultPage struct { - fn func(context.Context, NetworkRuleSetListResult) (NetworkRuleSetListResult, error) - nrslr NetworkRuleSetListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *NetworkRuleSetListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NetworkRuleSetListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.nrslr) - if err != nil { - return err - } - page.nrslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *NetworkRuleSetListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page NetworkRuleSetListResultPage) NotDone() bool { - return !page.nrslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page NetworkRuleSetListResultPage) Response() NetworkRuleSetListResult { - return page.nrslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page NetworkRuleSetListResultPage) Values() []NetworkRuleSet { - if page.nrslr.IsEmpty() { - return nil - } - return *page.nrslr.Value -} - -// Creates a new instance of the NetworkRuleSetListResultPage type. -func NewNetworkRuleSetListResultPage(cur NetworkRuleSetListResult, getNextPage func(context.Context, NetworkRuleSetListResult) (NetworkRuleSetListResult, error)) NetworkRuleSetListResultPage { - return NetworkRuleSetListResultPage{ - fn: getNextPage, - nrslr: cur, - } -} - -// NetworkRuleSetProperties networkRuleSet properties -type NetworkRuleSetProperties struct { - // DefaultAction - Default Action for Network Rule Set. Possible values include: 'Allow', 'Deny' - DefaultAction DefaultAction `json:"defaultAction,omitempty"` - // VirtualNetworkRules - List VirtualNetwork Rules - VirtualNetworkRules *[]NWRuleSetVirtualNetworkRules `json:"virtualNetworkRules,omitempty"` - // IPRules - List of IpRules - IPRules *[]NWRuleSetIPRules `json:"ipRules,omitempty"` -} - -// NWRuleSetIPRules description of NetWorkRuleSet - IpRules resource. -type NWRuleSetIPRules struct { - // IPMask - IP Mask - IPMask *string `json:"ipMask,omitempty"` - // Action - The IP Filter Action. Possible values include: 'NetworkRuleIPActionAllow' - Action NetworkRuleIPAction `json:"action,omitempty"` -} - -// NWRuleSetVirtualNetworkRules description of VirtualNetworkRules - NetworkRules resource. -type NWRuleSetVirtualNetworkRules struct { - // Subnet - Subnet properties - Subnet *Subnet `json:"subnet,omitempty"` - // IgnoreMissingVnetServiceEndpoint - Value that indicates whether to ignore missing VNet Service Endpoint - IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty"` -} - -// Operation a Event Hub REST API operation -type Operation struct { - // Name - READ-ONLY; Operation name: {provider}/{resource}/{operation} - Name *string `json:"name,omitempty"` - // Display - The object that represents the operation. - Display *OperationDisplay `json:"display,omitempty"` -} - -// MarshalJSON is the custom marshaler for Operation. -func (o Operation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if o.Display != nil { - objectMap["display"] = o.Display - } - return json.Marshal(objectMap) -} - -// OperationDisplay the object that represents the operation. -type OperationDisplay struct { - // Provider - READ-ONLY; Service provider: Microsoft.EventHub - Provider *string `json:"provider,omitempty"` - // Resource - READ-ONLY; Resource on which the operation is performed: Invoice, etc. - Resource *string `json:"resource,omitempty"` - // Operation - READ-ONLY; Operation type: Read, write, delete, etc. - Operation *string `json:"operation,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationDisplay. -func (o OperationDisplay) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OperationListResult result of the request to list Event Hub operations. It contains a list of operations -// and a URL link to get the next set of results. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; List of Event Hub operations supported by the Microsoft.EventHub resource provider. - Value *[]Operation `json:"value,omitempty"` - // NextLink - READ-ONLY; URL to get the next set of operation list results if there are any. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for OperationListResult. -func (olr OperationListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// OperationListResultIterator provides access to a complete listing of Operation values. -type OperationListResultIterator struct { - i int - page OperationListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *OperationListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter OperationListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter OperationListResultIterator) Response() OperationListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter OperationListResultIterator) Value() Operation { - if !iter.page.NotDone() { - return Operation{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the OperationListResultIterator type. -func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator { - return OperationListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (olr OperationListResult) IsEmpty() bool { - return olr.Value == nil || len(*olr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (olr OperationListResult) hasNextLink() bool { - return olr.NextLink != nil && len(*olr.NextLink) != 0 -} - -// operationListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) { - if !olr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(olr.NextLink))) -} - -// OperationListResultPage contains a page of Operation values. -type OperationListResultPage struct { - fn func(context.Context, OperationListResult) (OperationListResult, error) - olr OperationListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.olr) - if err != nil { - return err - } - page.olr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *OperationListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page OperationListResultPage) NotDone() bool { - return !page.olr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page OperationListResultPage) Response() OperationListResult { - return page.olr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page OperationListResultPage) Values() []Operation { - if page.olr.IsEmpty() { - return nil - } - return *page.olr.Value -} - -// Creates a new instance of the OperationListResultPage type. -func NewOperationListResultPage(cur OperationListResult, getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage { - return OperationListResultPage{ - fn: getNextPage, - olr: cur, - } -} - -// Properties properties supplied to the Create Or Update Event Hub operation. -type Properties struct { - // PartitionIds - READ-ONLY; Current number of shards on the Event Hub. - PartitionIds *[]string `json:"partitionIds,omitempty"` - // CreatedAt - READ-ONLY; Exact time the Event Hub was created. - CreatedAt *date.Time `json:"createdAt,omitempty"` - // UpdatedAt - READ-ONLY; The exact time the message was updated. - UpdatedAt *date.Time `json:"updatedAt,omitempty"` - // MessageRetentionInDays - Number of days to retain the events for this Event Hub, value should be 1 to 7 days - MessageRetentionInDays *int64 `json:"messageRetentionInDays,omitempty"` - // PartitionCount - Number of partitions created for the Event Hub, allowed values are from 1 to 32 partitions. - PartitionCount *int64 `json:"partitionCount,omitempty"` - // Status - Enumerates the possible values for the status of the Event Hub. Possible values include: 'Active', 'Disabled', 'Restoring', 'SendDisabled', 'ReceiveDisabled', 'Creating', 'Deleting', 'Renaming', 'Unknown' - Status EntityStatus `json:"status,omitempty"` - // CaptureDescription - Properties of capture description - CaptureDescription *CaptureDescription `json:"captureDescription,omitempty"` -} - -// MarshalJSON is the custom marshaler for Properties. -func (p Properties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if p.MessageRetentionInDays != nil { - objectMap["messageRetentionInDays"] = p.MessageRetentionInDays - } - if p.PartitionCount != nil { - objectMap["partitionCount"] = p.PartitionCount - } - if p.Status != "" { - objectMap["status"] = p.Status - } - if p.CaptureDescription != nil { - objectMap["captureDescription"] = p.CaptureDescription - } - return json.Marshal(objectMap) -} - -// RegenerateAccessKeyParameters parameters supplied to the Regenerate Authorization Rule operation, -// specifies which key needs to be reset. -type RegenerateAccessKeyParameters struct { - // KeyType - The access key to regenerate. Possible values include: 'PrimaryKey', 'SecondaryKey' - KeyType KeyType `json:"keyType,omitempty"` - // Key - Optional, if the key value provided, is set for KeyType or autogenerated Key value set for keyType - Key *string `json:"key,omitempty"` -} - -// Resource the resource definition. -type Resource struct { - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Sku SKU parameters supplied to the create namespace operation -type Sku struct { - // Name - Name of this SKU. Possible values include: 'Basic', 'Standard' - Name SkuName `json:"name,omitempty"` - // Tier - The billing tier of this particular SKU. Possible values include: 'SkuTierBasic', 'SkuTierStandard' - Tier SkuTier `json:"tier,omitempty"` - // Capacity - The Event Hubs throughput units, value should be 0 to 20 throughput units. - Capacity *int32 `json:"capacity,omitempty"` -} - -// Subnet properties supplied for Subnet -type Subnet struct { - // ID - Resource ID of Virtual Network Subnet - ID *string `json:"id,omitempty"` -} - -// TrackedResource definition of resource. -type TrackedResource struct { - // Location - Resource location. - Location *string `json:"location,omitempty"` - // Tags - Resource tags. - Tags map[string]*string `json:"tags"` - // ID - READ-ONLY; Resource ID. - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Resource name. - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Resource type. - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for TrackedResource. -func (tr TrackedResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if tr.Location != nil { - objectMap["location"] = tr.Location - } - if tr.Tags != nil { - objectMap["tags"] = tr.Tags - } - return json.Marshal(objectMap) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/namespaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/namespaces.go deleted file mode 100644 index 2bdf3e98e8d..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/namespaces.go +++ /dev/null @@ -1,1694 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// NamespacesClient is the azure Event Hubs client -type NamespacesClient struct { - BaseClient -} - -// NewNamespacesClient creates an instance of the NamespacesClient client. -func NewNamespacesClient(subscriptionID string) NamespacesClient { - return NewNamespacesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewNamespacesClientWithBaseURI creates an instance of the NamespacesClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewNamespacesClientWithBaseURI(baseURI string, subscriptionID string) NamespacesClient { - return NamespacesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CheckNameAvailability check the give Namespace name availability. -// Parameters: -// parameters - parameters to check availability of the given Namespace name -func (client NamespacesClient) CheckNameAvailability(ctx context.Context, parameters CheckNameAvailabilityParameter) (result CheckNameAvailabilityResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CheckNameAvailability") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Name", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "CheckNameAvailability", err.Error()) - } - - req, err := client.CheckNameAvailabilityPreparer(ctx, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CheckNameAvailability", nil, "Failure preparing request") - return - } - - resp, err := client.CheckNameAvailabilitySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CheckNameAvailability", resp, "Failure sending request") - return - } - - result, err = client.CheckNameAvailabilityResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CheckNameAvailability", resp, "Failure responding to request") - return - } - - return -} - -// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. -func (client NamespacesClient) CheckNameAvailabilityPreparer(ctx context.Context, parameters CheckNameAvailabilityParameter) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/checkNameAvailability", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always -// closes the http.Response Body. -func (client NamespacesClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdate creates or updates a namespace. Once created, this namespace's resource manifest is immutable. This -// operation is idempotent. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// parameters - parameters for creating a namespace resource. -func (client NamespacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace) (result NamespacesCreateOrUpdateFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Sku.Capacity", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.Sku.Capacity", Name: validation.InclusiveMaximum, Rule: int64(20), Chain: nil}, - {Target: "parameters.Sku.Capacity", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, - }}, - }}, - {Target: "parameters.EHNamespaceProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.EHNamespaceProperties.MaximumThroughputUnits", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.EHNamespaceProperties.MaximumThroughputUnits", Name: validation.InclusiveMaximum, Rule: int64(20), Chain: nil}, - {Target: "parameters.EHNamespaceProperties.MaximumThroughputUnits", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}, - }}, - }}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, namespaceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - result, err = client.CreateOrUpdateSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdate", result.Response(), "Failure sending request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client NamespacesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) CreateOrUpdateSender(req *http.Request) (future NamespacesCreateOrUpdateFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client NamespacesClient) CreateOrUpdateResponder(resp *http.Response) (result EHNamespace, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdateAuthorizationRule creates or updates an AuthorizationRule for a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// authorizationRuleName - the authorization rule name. -// parameters - the shared access AuthorizationRule. -func (client NamespacesClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters AuthorizationRule) (result AuthorizationRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CreateOrUpdateAuthorizationRule") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.AuthorizationRuleProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.AuthorizationRuleProperties.Rights", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "CreateOrUpdateAuthorizationRule", err.Error()) - } - - req, err := client.CreateOrUpdateAuthorizationRulePreparer(ctx, resourceGroupName, namespaceName, authorizationRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. -func (client NamespacesClient) CreateOrUpdateAuthorizationRulePreparer(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters AuthorizationRule) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always -// closes the http.Response Body. -func (client NamespacesClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result AuthorizationRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateOrUpdateNetworkRuleSet create or update NetworkRuleSet for a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// parameters - the Namespace IpFilterRule. -func (client NamespacesClient) CreateOrUpdateNetworkRuleSet(ctx context.Context, resourceGroupName string, namespaceName string, parameters NetworkRuleSet) (result NetworkRuleSet, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CreateOrUpdateNetworkRuleSet") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "CreateOrUpdateNetworkRuleSet", err.Error()) - } - - req, err := client.CreateOrUpdateNetworkRuleSetPreparer(ctx, resourceGroupName, namespaceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdateNetworkRuleSet", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateNetworkRuleSetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdateNetworkRuleSet", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateNetworkRuleSetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "CreateOrUpdateNetworkRuleSet", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdateNetworkRuleSetPreparer prepares the CreateOrUpdateNetworkRuleSet request. -func (client NamespacesClient) CreateOrUpdateNetworkRuleSetPreparer(ctx context.Context, resourceGroupName string, namespaceName string, parameters NetworkRuleSet) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/networkRuleSets/default", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateNetworkRuleSetSender sends the CreateOrUpdateNetworkRuleSet request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) CreateOrUpdateNetworkRuleSetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateNetworkRuleSetResponder handles the response to the CreateOrUpdateNetworkRuleSet request. The method always -// closes the http.Response Body. -func (client NamespacesClient) CreateOrUpdateNetworkRuleSetResponder(resp *http.Response) (result NetworkRuleSet, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes an existing namespace. This operation also removes all associated resources under the namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -func (client NamespacesClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string) (result NamespacesDeleteFuture, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.Delete") - defer func() { - sc := -1 - if result.FutureAPI != nil && result.FutureAPI.Response() != nil { - sc = result.FutureAPI.Response().StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "Delete", err.Error()) - } - - req, err := client.DeletePreparer(ctx, resourceGroupName, namespaceName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Delete", nil, "Failure preparing request") - return - } - - result, err = client.DeleteSender(req) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Delete", result.Response(), "Failure sending request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client NamespacesClient) DeletePreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) DeleteSender(req *http.Request) (future NamespacesDeleteFuture, err error) { - var resp *http.Response - future.FutureAPI = &azure.Future{} - resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) - if err != nil { - return - } - var azf azure.Future - azf, err = azure.NewFutureFromResponse(resp) - future.FutureAPI = &azf - future.Result = future.result - return -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client NamespacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// DeleteAuthorizationRule deletes an AuthorizationRule for a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// authorizationRuleName - the authorization rule name. -func (client NamespacesClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.DeleteAuthorizationRule") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "DeleteAuthorizationRule", err.Error()) - } - - req, err := client.DeleteAuthorizationRulePreparer(ctx, resourceGroupName, namespaceName, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "DeleteAuthorizationRule", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteAuthorizationRuleSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure sending request") - return - } - - result, err = client.DeleteAuthorizationRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure responding to request") - return - } - - return -} - -// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. -func (client NamespacesClient) DeleteAuthorizationRulePreparer(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always -// closes the http.Response Body. -func (client NamespacesClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the description of the specified namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -func (client NamespacesClient) Get(ctx context.Context, resourceGroupName string, namespaceName string) (result EHNamespace, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "Get", err.Error()) - } - - req, err := client.GetPreparer(ctx, resourceGroupName, namespaceName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client NamespacesClient) GetPreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client NamespacesClient) GetResponder(resp *http.Response) (result EHNamespace, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetAuthorizationRule gets an AuthorizationRule for a Namespace by rule name. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// authorizationRuleName - the authorization rule name. -func (client NamespacesClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result AuthorizationRule, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.GetAuthorizationRule") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "GetAuthorizationRule", err.Error()) - } - - req, err := client.GetAuthorizationRulePreparer(ctx, resourceGroupName, namespaceName, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetAuthorizationRule", nil, "Failure preparing request") - return - } - - resp, err := client.GetAuthorizationRuleSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetAuthorizationRule", resp, "Failure sending request") - return - } - - result, err = client.GetAuthorizationRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetAuthorizationRule", resp, "Failure responding to request") - return - } - - return -} - -// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. -func (client NamespacesClient) GetAuthorizationRulePreparer(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always -// closes the http.Response Body. -func (client NamespacesClient) GetAuthorizationRuleResponder(resp *http.Response) (result AuthorizationRule, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetMessagingPlan gets messaging plan for specified namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -func (client NamespacesClient) GetMessagingPlan(ctx context.Context, resourceGroupName string, namespaceName string) (result MessagingPlan, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.GetMessagingPlan") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "GetMessagingPlan", err.Error()) - } - - req, err := client.GetMessagingPlanPreparer(ctx, resourceGroupName, namespaceName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetMessagingPlan", nil, "Failure preparing request") - return - } - - resp, err := client.GetMessagingPlanSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetMessagingPlan", resp, "Failure sending request") - return - } - - result, err = client.GetMessagingPlanResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetMessagingPlan", resp, "Failure responding to request") - return - } - - return -} - -// GetMessagingPlanPreparer prepares the GetMessagingPlan request. -func (client NamespacesClient) GetMessagingPlanPreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/messagingplan", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetMessagingPlanSender sends the GetMessagingPlan request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) GetMessagingPlanSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetMessagingPlanResponder handles the response to the GetMessagingPlan request. The method always -// closes the http.Response Body. -func (client NamespacesClient) GetMessagingPlanResponder(resp *http.Response) (result MessagingPlan, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetNetworkRuleSet gets NetworkRuleSet for a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -func (client NamespacesClient) GetNetworkRuleSet(ctx context.Context, resourceGroupName string, namespaceName string) (result NetworkRuleSet, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.GetNetworkRuleSet") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "GetNetworkRuleSet", err.Error()) - } - - req, err := client.GetNetworkRuleSetPreparer(ctx, resourceGroupName, namespaceName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetNetworkRuleSet", nil, "Failure preparing request") - return - } - - resp, err := client.GetNetworkRuleSetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetNetworkRuleSet", resp, "Failure sending request") - return - } - - result, err = client.GetNetworkRuleSetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "GetNetworkRuleSet", resp, "Failure responding to request") - return - } - - return -} - -// GetNetworkRuleSetPreparer prepares the GetNetworkRuleSet request. -func (client NamespacesClient) GetNetworkRuleSetPreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/networkRuleSets/default", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetNetworkRuleSetSender sends the GetNetworkRuleSet request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) GetNetworkRuleSetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetNetworkRuleSetResponder handles the response to the GetNetworkRuleSet request. The method always -// closes the http.Response Body. -func (client NamespacesClient) GetNetworkRuleSetResponder(resp *http.Response) (result NetworkRuleSet, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists all the available Namespaces within a subscription, irrespective of the resource groups. -func (client NamespacesClient) List(ctx context.Context) (result EHNamespaceListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.List") - defer func() { - sc := -1 - if result.enlr.Response.Response != nil { - sc = result.enlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.enlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "List", resp, "Failure sending request") - return - } - - result.enlr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "List", resp, "Failure responding to request") - return - } - if result.enlr.hasNextLink() && result.enlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client NamespacesClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/namespaces", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client NamespacesClient) ListResponder(resp *http.Response) (result EHNamespaceListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client NamespacesClient) listNextResults(ctx context.Context, lastResults EHNamespaceListResult) (result EHNamespaceListResult, err error) { - req, err := lastResults.eHNamespaceListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client NamespacesClient) ListComplete(ctx context.Context) (result EHNamespaceListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx) - return -} - -// ListAuthorizationRules gets a list of authorization rules for a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -func (client NamespacesClient) ListAuthorizationRules(ctx context.Context, resourceGroupName string, namespaceName string) (result AuthorizationRuleListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListAuthorizationRules") - defer func() { - sc := -1 - if result.arlr.Response.Response != nil { - sc = result.arlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "ListAuthorizationRules", err.Error()) - } - - result.fn = client.listAuthorizationRulesNextResults - req, err := client.ListAuthorizationRulesPreparer(ctx, resourceGroupName, namespaceName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListAuthorizationRules", nil, "Failure preparing request") - return - } - - resp, err := client.ListAuthorizationRulesSender(req) - if err != nil { - result.arlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListAuthorizationRules", resp, "Failure sending request") - return - } - - result.arlr, err = client.ListAuthorizationRulesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListAuthorizationRules", resp, "Failure responding to request") - return - } - if result.arlr.hasNextLink() && result.arlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. -func (client NamespacesClient) ListAuthorizationRulesPreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always -// closes the http.Response Body. -func (client NamespacesClient) ListAuthorizationRulesResponder(resp *http.Response) (result AuthorizationRuleListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listAuthorizationRulesNextResults retrieves the next set of results, if any. -func (client NamespacesClient) listAuthorizationRulesNextResults(ctx context.Context, lastResults AuthorizationRuleListResult) (result AuthorizationRuleListResult, err error) { - req, err := lastResults.authorizationRuleListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listAuthorizationRulesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListAuthorizationRulesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listAuthorizationRulesNextResults", resp, "Failure sending next results request") - } - result, err = client.ListAuthorizationRulesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listAuthorizationRulesNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListAuthorizationRulesComplete enumerates all values, automatically crossing page boundaries as required. -func (client NamespacesClient) ListAuthorizationRulesComplete(ctx context.Context, resourceGroupName string, namespaceName string) (result AuthorizationRuleListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListAuthorizationRules") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListAuthorizationRules(ctx, resourceGroupName, namespaceName) - return -} - -// ListByResourceGroup lists the available Namespaces within a resource group. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -func (client NamespacesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result EHNamespaceListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.enlr.Response.Response != nil { - sc = result.enlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "ListByResourceGroup", err.Error()) - } - - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.enlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.enlr, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.enlr.hasNextLink() && result.enlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client NamespacesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client NamespacesClient) ListByResourceGroupResponder(resp *http.Response) (result EHNamespaceListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client NamespacesClient) listByResourceGroupNextResults(ctx context.Context, lastResults EHNamespaceListResult) (result EHNamespaceListResult, err error) { - req, err := lastResults.eHNamespaceListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client NamespacesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result EHNamespaceListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) - return -} - -// ListKeys gets the primary and secondary connection strings for the Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// authorizationRuleName - the authorization rule name. -func (client NamespacesClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result AccessKeys, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListKeys") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "ListKeys", err.Error()) - } - - req, err := client.ListKeysPreparer(ctx, resourceGroupName, namespaceName, authorizationRuleName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListKeys", nil, "Failure preparing request") - return - } - - resp, err := client.ListKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListKeys", resp, "Failure sending request") - return - } - - result, err = client.ListKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListKeys", resp, "Failure responding to request") - return - } - - return -} - -// ListKeysPreparer prepares the ListKeys request. -func (client NamespacesClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}/listKeys", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListKeysSender sends the ListKeys request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListKeysResponder handles the response to the ListKeys request. The method always -// closes the http.Response Body. -func (client NamespacesClient) ListKeysResponder(resp *http.Response) (result AccessKeys, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListNetworkRuleSets gets list of NetworkRuleSet for a Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -func (client NamespacesClient) ListNetworkRuleSets(ctx context.Context, resourceGroupName string, namespaceName string) (result NetworkRuleSetListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListNetworkRuleSets") - defer func() { - sc := -1 - if result.nrslr.Response.Response != nil { - sc = result.nrslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "ListNetworkRuleSets", err.Error()) - } - - result.fn = client.listNetworkRuleSetsNextResults - req, err := client.ListNetworkRuleSetsPreparer(ctx, resourceGroupName, namespaceName) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListNetworkRuleSets", nil, "Failure preparing request") - return - } - - resp, err := client.ListNetworkRuleSetsSender(req) - if err != nil { - result.nrslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListNetworkRuleSets", resp, "Failure sending request") - return - } - - result.nrslr, err = client.ListNetworkRuleSetsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "ListNetworkRuleSets", resp, "Failure responding to request") - return - } - if result.nrslr.hasNextLink() && result.nrslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListNetworkRuleSetsPreparer prepares the ListNetworkRuleSets request. -func (client NamespacesClient) ListNetworkRuleSetsPreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/networkRuleSets", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListNetworkRuleSetsSender sends the ListNetworkRuleSets request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) ListNetworkRuleSetsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListNetworkRuleSetsResponder handles the response to the ListNetworkRuleSets request. The method always -// closes the http.Response Body. -func (client NamespacesClient) ListNetworkRuleSetsResponder(resp *http.Response) (result NetworkRuleSetListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNetworkRuleSetsNextResults retrieves the next set of results, if any. -func (client NamespacesClient) listNetworkRuleSetsNextResults(ctx context.Context, lastResults NetworkRuleSetListResult) (result NetworkRuleSetListResult, err error) { - req, err := lastResults.networkRuleSetListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listNetworkRuleSetsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListNetworkRuleSetsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listNetworkRuleSetsNextResults", resp, "Failure sending next results request") - } - result, err = client.ListNetworkRuleSetsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "listNetworkRuleSetsNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListNetworkRuleSetsComplete enumerates all values, automatically crossing page boundaries as required. -func (client NamespacesClient) ListNetworkRuleSetsComplete(ctx context.Context, resourceGroupName string, namespaceName string) (result NetworkRuleSetListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListNetworkRuleSets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListNetworkRuleSets(ctx, resourceGroupName, namespaceName) - return -} - -// RegenerateKeys regenerates the primary or secondary connection strings for the specified Namespace. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// authorizationRuleName - the authorization rule name. -// parameters - parameters required to regenerate the connection string. -func (client NamespacesClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.RegenerateKeys") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}, - {TargetValue: authorizationRuleName, - Constraints: []validation.Constraint{{Target: "authorizationRuleName", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "RegenerateKeys", err.Error()) - } - - req, err := client.RegenerateKeysPreparer(ctx, resourceGroupName, namespaceName, authorizationRuleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "RegenerateKeys", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "RegenerateKeys", resp, "Failure sending request") - return - } - - result, err = client.RegenerateKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "RegenerateKeys", resp, "Failure responding to request") - return - } - - return -} - -// RegenerateKeysPreparer prepares the RegenerateKeys request. -func (client NamespacesClient) RegenerateKeysPreparer(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "authorizationRuleName": autorest.Encode("path", authorizationRuleName), - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}/authorizationRules/{authorizationRuleName}/regenerateKeys", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RegenerateKeysSender sends the RegenerateKeys request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) RegenerateKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// RegenerateKeysResponder handles the response to the RegenerateKeys request. The method always -// closes the http.Response Body. -func (client NamespacesClient) RegenerateKeysResponder(resp *http.Response) (result AccessKeys, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update creates or updates a namespace. Once created, this namespace's resource manifest is immutable. This operation -// is idempotent. -// Parameters: -// resourceGroupName - name of the resource group within the azure subscription. -// namespaceName - the Namespace name -// parameters - parameters for updating a namespace resource. -func (client NamespacesClient) Update(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace) (result EHNamespace, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: resourceGroupName, - Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, - {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}}, - {TargetValue: namespaceName, - Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.NamespacesClient", "Update", err.Error()) - } - - req, err := client.UpdatePreparer(ctx, resourceGroupName, namespaceName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.NamespacesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client NamespacesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, namespaceName string, parameters EHNamespace) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "namespaceName": autorest.Encode("path", namespaceName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/namespaces/{namespaceName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client NamespacesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client NamespacesClient) UpdateResponder(resp *http.Response) (result EHNamespace, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/operations.go deleted file mode 100644 index 9af42a5896c..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/operations.go +++ /dev/null @@ -1,140 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// OperationsClient is the azure Event Hubs client -type OperationsClient struct { - BaseClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists all of the available Event Hub REST API operations. -func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") - defer func() { - sc := -1 - if result.olr.Response.Response != nil { - sc = result.olr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.olr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.OperationsClient", "List", resp, "Failure sending request") - return - } - - result.olr, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.OperationsClient", "List", resp, "Failure responding to request") - return - } - if result.olr.hasNextLink() && result.olr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/Microsoft.EventHub/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) { - req, err := lastResults.operationListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.OperationsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.OperationsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.OperationsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx) - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/regions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/regions.go deleted file mode 100644 index 1e99dd75516..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/regions.go +++ /dev/null @@ -1,155 +0,0 @@ -package eventhub - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// RegionsClient is the azure Event Hubs client -type RegionsClient struct { - BaseClient -} - -// NewRegionsClient creates an instance of the RegionsClient client. -func NewRegionsClient(subscriptionID string) RegionsClient { - return NewRegionsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewRegionsClientWithBaseURI creates an instance of the RegionsClient client using a custom endpoint. Use this when -// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewRegionsClientWithBaseURI(baseURI string, subscriptionID string) RegionsClient { - return RegionsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// ListBySku gets the available Regions for a given sku -// Parameters: -// sku - the sku type. -func (client RegionsClient) ListBySku(ctx context.Context, sku string) (result MessagingRegionsListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RegionsClient.ListBySku") - defer func() { - sc := -1 - if result.mrlr.Response.Response != nil { - sc = result.mrlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: sku, - Constraints: []validation.Constraint{{Target: "sku", Name: validation.MaxLength, Rule: 50, Chain: nil}, - {Target: "sku", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { - return result, validation.NewError("eventhub.RegionsClient", "ListBySku", err.Error()) - } - - result.fn = client.listBySkuNextResults - req, err := client.ListBySkuPreparer(ctx, sku) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.RegionsClient", "ListBySku", nil, "Failure preparing request") - return - } - - resp, err := client.ListBySkuSender(req) - if err != nil { - result.mrlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "eventhub.RegionsClient", "ListBySku", resp, "Failure sending request") - return - } - - result.mrlr, err = client.ListBySkuResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.RegionsClient", "ListBySku", resp, "Failure responding to request") - return - } - if result.mrlr.hasNextLink() && result.mrlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListBySkuPreparer prepares the ListBySku request. -func (client RegionsClient) ListBySkuPreparer(ctx context.Context, sku string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "sku": autorest.Encode("path", sku), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.EventHub/sku/{sku}/regions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListBySkuSender sends the ListBySku request. The method will close the -// http.Response Body if it receives an error. -func (client RegionsClient) ListBySkuSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListBySkuResponder handles the response to the ListBySku request. The method always -// closes the http.Response Body. -func (client RegionsClient) ListBySkuResponder(resp *http.Response) (result MessagingRegionsListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listBySkuNextResults retrieves the next set of results, if any. -func (client RegionsClient) listBySkuNextResults(ctx context.Context, lastResults MessagingRegionsListResult) (result MessagingRegionsListResult, err error) { - req, err := lastResults.messagingRegionsListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "eventhub.RegionsClient", "listBySkuNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListBySkuSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "eventhub.RegionsClient", "listBySkuNextResults", resp, "Failure sending next results request") - } - result, err = client.ListBySkuResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "eventhub.RegionsClient", "listBySkuNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListBySkuComplete enumerates all values, automatically crossing page boundaries as required. -func (client RegionsClient) ListBySkuComplete(ctx context.Context, sku string) (result MessagingRegionsListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/RegionsClient.ListBySku") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListBySku(ctx, sku) - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/version.go deleted file mode 100644 index d758998817b..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package eventhub - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " eventhub/2017-04-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md deleted file mode 100644 index 6c701c1c40a..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/CHANGELOG.md +++ /dev/null @@ -1,26 +0,0 @@ -# Change History - -## Additive Changes - -### New Funcs - -1. BackupCertificateResult.MarshalJSON() ([]byte, error) -1. BackupKeyResult.MarshalJSON() ([]byte, error) -1. BackupSecretResult.MarshalJSON() ([]byte, error) -1. BackupStorageResult.MarshalJSON() ([]byte, error) -1. CertificateIssuerListResult.MarshalJSON() ([]byte, error) -1. CertificateListResult.MarshalJSON() ([]byte, error) -1. DeletedCertificateListResult.MarshalJSON() ([]byte, error) -1. DeletedKeyListResult.MarshalJSON() ([]byte, error) -1. DeletedSasDefinitionListResult.MarshalJSON() ([]byte, error) -1. DeletedSecretListResult.MarshalJSON() ([]byte, error) -1. DeletedStorageListResult.MarshalJSON() ([]byte, error) -1. Error.MarshalJSON() ([]byte, error) -1. ErrorType.MarshalJSON() ([]byte, error) -1. KeyListResult.MarshalJSON() ([]byte, error) -1. KeyOperationResult.MarshalJSON() ([]byte, error) -1. KeyVerifyResult.MarshalJSON() ([]byte, error) -1. PendingCertificateSigningRequestResult.MarshalJSON() ([]byte, error) -1. SasDefinitionListResult.MarshalJSON() ([]byte, error) -1. SecretListResult.MarshalJSON() ([]byte, error) -1. StorageListResult.MarshalJSON() ([]byte, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go deleted file mode 100644 index c5589a020b4..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/client.go +++ /dev/null @@ -1,7313 +0,0 @@ -// Package keyvault implements the Azure ARM Keyvault service API version 7.0. -// -// The key vault client performs cryptographic key operations and vault operations against the Key Vault service. -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// BaseClient is the base client for Keyvault. -type BaseClient struct { - autorest.Client -} - -// New creates an instance of the BaseClient client. -func New() BaseClient { - return NewWithoutDefaults() -} - -// NewWithoutDefaults creates an instance of the BaseClient client. -func NewWithoutDefaults() BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - } -} - -// BackupCertificate requests that a backup of the specified certificate be downloaded to the client. All versions of -// the certificate will be downloaded. This operation requires the certificates/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) BackupCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result BackupCertificateResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.BackupCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure sending request") - return - } - - result, err = client.BackupCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupCertificate", resp, "Failure responding to request") - return - } - - return -} - -// BackupCertificatePreparer prepares the BackupCertificate request. -func (client BaseClient) BackupCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupCertificateSender sends the BackupCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupCertificateResponder handles the response to the BackupCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupCertificateResponder(resp *http.Response) (result BackupCertificateResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupKey the Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this operation -// does NOT return key material in a form that can be used outside the Azure Key Vault system, the returned key -// material is either protected to a Azure Key Vault HSM or to Azure Key Vault itself. The intent of this operation is -// to allow a client to GENERATE a key in one Azure Key Vault instance, BACKUP the key, and then RESTORE it into -// another Azure Key Vault instance. The BACKUP operation may be used to export, in protected form, any key type from -// Azure Key Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed within -// geographical boundaries only; meaning that a BACKUP from one geographical area cannot be restored to another -// geographical area. For example, a backup from the US geographical area cannot be restored in an EU geographical -// area. This operation requires the key/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) BackupKey(ctx context.Context, vaultBaseURL string, keyName string) (result BackupKeyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", nil, "Failure preparing request") - return - } - - resp, err := client.BackupKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure sending request") - return - } - - result, err = client.BackupKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupKey", resp, "Failure responding to request") - return - } - - return -} - -// BackupKeyPreparer prepares the BackupKey request. -func (client BaseClient) BackupKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupKeySender sends the BackupKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupKeyResponder handles the response to the BackupKey request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupKeyResponder(resp *http.Response) (result BackupKeyResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupSecret requests that a backup of the specified secret be downloaded to the client. All versions of the secret -// will be downloaded. This operation requires the secrets/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) BackupSecret(ctx context.Context, vaultBaseURL string, secretName string) (result BackupSecretResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", nil, "Failure preparing request") - return - } - - resp, err := client.BackupSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure sending request") - return - } - - result, err = client.BackupSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupSecret", resp, "Failure responding to request") - return - } - - return -} - -// BackupSecretPreparer prepares the BackupSecret request. -func (client BaseClient) BackupSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupSecretSender sends the BackupSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupSecretResponder handles the response to the BackupSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupSecretResponder(resp *http.Response) (result BackupSecretResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// BackupStorageAccount requests that a backup of the specified storage account be downloaded to the client. This -// operation requires the storage/backup permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) BackupStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result BackupStorageResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.BackupStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.BackupStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.BackupStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.BackupStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "BackupStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// BackupStorageAccountPreparer prepares the BackupStorageAccount request. -func (client BaseClient) BackupStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/backup", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// BackupStorageAccountSender sends the BackupStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) BackupStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// BackupStorageAccountResponder handles the response to the BackupStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) BackupStorageAccountResponder(resp *http.Response) (result BackupStorageResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateCertificate if this is the first version, the certificate resource is created. This operation requires the -// certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to create a certificate. -func (client BaseClient) CreateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateCertificate", err.Error()) - } - - req, err := client.CreateCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure sending request") - return - } - - result, err = client.CreateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateCertificate", resp, "Failure responding to request") - return - } - - return -} - -// CreateCertificatePreparer prepares the CreateCertificate request. -func (client BaseClient) CreateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateCertificateSender sends the CreateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateCertificateResponder handles the response to the CreateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateCertificateResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// CreateKey the create key operation can be used to create any key type in Azure Key Vault. If the named key already -// exists, Azure Key Vault creates a new version of the key. It requires the keys/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name for the new key. The system will generate the version name for the new key. -// parameters - the parameters to create a key. -func (client BaseClient) CreateKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.CreateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "CreateKey", err.Error()) - } - - req, err := client.CreateKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", nil, "Failure preparing request") - return - } - - resp, err := client.CreateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure sending request") - return - } - - result, err = client.CreateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "CreateKey", resp, "Failure responding to request") - return - } - - return -} - -// CreateKeyPreparer prepares the CreateKey request. -func (client BaseClient) CreateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/create", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateKeySender sends the CreateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) CreateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateKeyResponder handles the response to the CreateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) CreateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Decrypt the DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption key and -// specified algorithm. This operation is the reverse of the ENCRYPT operation; only a single block of data may be -// decrypted, the size of this block is dependent on the target key and the algorithm to be used. The DECRYPT operation -// applies to asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of the key. -// This operation requires the keys/decrypt permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the decryption operation. -func (client BaseClient) Decrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Decrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Decrypt", err.Error()) - } - - req, err := client.DecryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", nil, "Failure preparing request") - return - } - - resp, err := client.DecryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure sending request") - return - } - - result, err = client.DecryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Decrypt", resp, "Failure responding to request") - return - } - - return -} - -// DecryptPreparer prepares the Decrypt request. -func (client BaseClient) DecryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/decrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DecryptSender sends the Decrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DecryptSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DecryptResponder handles the response to the Decrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) DecryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificate deletes all versions of a certificate object along with its associated policy. Delete certificate -// cannot be used to remove individual versions of a certificate object. This operation requires the -// certificates/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificate", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificatePreparer prepares the DeleteCertificate request. -func (client BaseClient) DeleteCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateSender sends the DeleteCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateResponder handles the response to the DeleteCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateContacts deletes the certificate contacts for a specified key vault certificate. This operation -// requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) DeleteCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateContactsPreparer prepares the DeleteCertificateContacts request. -func (client BaseClient) DeleteCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateContactsSender sends the DeleteCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateContactsResponder handles the response to the DeleteCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateIssuer the DeleteCertificateIssuer operation permanently removes the specified certificate issuer -// from the vault. This operation requires the certificates/manageissuers/deleteissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) DeleteCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateIssuerPreparer prepares the DeleteCertificateIssuer request. -func (client BaseClient) DeleteCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateIssuerSender sends the DeleteCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateIssuerResponder handles the response to the DeleteCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteCertificateOperation deletes the creation operation for a specified certificate that is in the process of -// being created. The certificate is no longer created. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) DeleteCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.DeleteCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// DeleteCertificateOperationPreparer prepares the DeleteCertificateOperation request. -func (client BaseClient) DeleteCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteCertificateOperationSender sends the DeleteCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteCertificateOperationResponder handles the response to the DeleteCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteKey the delete key operation cannot be used to remove individual versions of a key. This operation removes the -// cryptographic material associated with the key, which means the key is not usable for Sign/Verify, Wrap/Unwrap or -// Encrypt/Decrypt operations. This operation requires the keys/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to delete. -func (client BaseClient) DeleteKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure sending request") - return - } - - result, err = client.DeleteKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteKey", resp, "Failure responding to request") - return - } - - return -} - -// DeleteKeyPreparer prepares the DeleteKey request. -func (client BaseClient) DeleteKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteKeySender sends the DeleteKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteKeyResponder handles the response to the DeleteKey request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSasDefinition deletes a SAS definition from a specified storage account. This operation requires the -// storage/deletesas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) DeleteSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteSasDefinition", err.Error()) - } - - req, err := client.DeleteSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.DeleteSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// DeleteSasDefinitionPreparer prepares the DeleteSasDefinition request. -func (client BaseClient) DeleteSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSasDefinitionSender sends the DeleteSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteSasDefinitionResponder handles the response to the DeleteSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteSecret the DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied to an -// individual version of a secret. This operation requires the secrets/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) DeleteSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeleteSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure sending request") - return - } - - result, err = client.DeleteSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteSecret", resp, "Failure responding to request") - return - } - - return -} - -// DeleteSecretPreparer prepares the DeleteSecret request. -func (client BaseClient) DeleteSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSecretSender sends the DeleteSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteSecretResponder handles the response to the DeleteSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// DeleteStorageAccount deletes a storage account. This operation requires the storage/delete permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) DeleteStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.DeleteStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "DeleteStorageAccount", err.Error()) - } - - req, err := client.DeleteStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.DeleteStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "DeleteStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// DeleteStorageAccountPreparer prepares the DeleteStorageAccount request. -func (client BaseClient) DeleteStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteStorageAccountSender sends the DeleteStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) DeleteStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteStorageAccountResponder handles the response to the DeleteStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) DeleteStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Encrypt the ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is stored in -// Azure Key Vault. Note that the ENCRYPT operation only supports a single block of data, the size of which is -// dependent on the target key and the encryption algorithm to be used. The ENCRYPT operation is only strictly -// necessary for symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be performed -// using public portion of the key. This operation is supported for asymmetric keys as a convenience for callers that -// have a key-reference but do not have access to the public key material. This operation requires the keys/encrypt -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the encryption operation. -func (client BaseClient) Encrypt(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Encrypt") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Encrypt", err.Error()) - } - - req, err := client.EncryptPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", nil, "Failure preparing request") - return - } - - resp, err := client.EncryptSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure sending request") - return - } - - result, err = client.EncryptResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Encrypt", resp, "Failure responding to request") - return - } - - return -} - -// EncryptPreparer prepares the Encrypt request. -func (client BaseClient) EncryptPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/encrypt", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// EncryptSender sends the Encrypt request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) EncryptSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// EncryptResponder handles the response to the Encrypt request. The method always -// closes the http.Response Body. -func (client BaseClient) EncryptResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificate gets information about a specific certificate. This operation requires the certificates/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificateVersion - the version of the certificate. This URI fragment is optional. If not specified, the -// latest version of the certificate is returned. -func (client BaseClient) GetCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificate", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificatePreparer prepares the GetCertificate request. -func (client BaseClient) GetCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateSender sends the GetCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateResponder handles the response to the GetCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateContacts the GetCertificateContacts operation returns the set of certificate contact resources in the -// specified key vault. This operation requires the certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -func (client BaseClient) GetCertificateContacts(ctx context.Context, vaultBaseURL string) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateContactsPreparer(ctx, vaultBaseURL) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateContactsPreparer prepares the GetCertificateContacts request. -func (client BaseClient) GetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateContactsSender sends the GetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateContactsResponder handles the response to the GetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuer the GetCertificateIssuer operation returns the specified certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -func (client BaseClient) GetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateIssuerPreparer prepares the GetCertificateIssuer request. -func (client BaseClient) GetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuerSender sends the GetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateIssuerResponder handles the response to the GetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificateIssuers the GetCertificateIssuers operation returns the set of certificate issuer resources in the -// specified key vault. This operation requires the certificates/manageissuers/getissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateIssuers(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.cilr.Response.Response != nil { - sc = result.cilr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateIssuers", err.Error()) - } - - result.fn = client.getCertificateIssuersNextResults - req, err := client.GetCertificateIssuersPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.cilr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure sending request") - return - } - - result.cilr, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateIssuers", resp, "Failure responding to request") - return - } - if result.cilr.hasNextLink() && result.cilr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificateIssuersPreparer prepares the GetCertificateIssuers request. -func (client BaseClient) GetCertificateIssuersPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/issuers"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateIssuersSender sends the GetCertificateIssuers request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateIssuersSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateIssuersResponder handles the response to the GetCertificateIssuers request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateIssuersResponder(resp *http.Response) (result CertificateIssuerListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateIssuersNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateIssuersNextResults(ctx context.Context, lastResults CertificateIssuerListResult) (result CertificateIssuerListResult, err error) { - req, err := lastResults.certificateIssuerListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateIssuersSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateIssuersResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateIssuersNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateIssuersComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateIssuersComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result CertificateIssuerListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateIssuers") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateIssuers(ctx, vaultBaseURL, maxresults) - return -} - -// GetCertificateOperation gets the creation operation associated with a specified certificate. This operation requires -// the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -func (client BaseClient) GetCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificateOperationPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.GetCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificateOperationPreparer prepares the GetCertificateOperation request. -func (client BaseClient) GetCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateOperationSender sends the GetCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateOperationResponder handles the response to the GetCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificatePolicy the GetCertificatePolicy operation returns the specified certificate policy resources in the -// specified key vault. This operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in a given key vault. -func (client BaseClient) GetCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.GetCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificatePolicy", resp, "Failure responding to request") - return - } - - return -} - -// GetCertificatePolicyPreparer prepares the GetCertificatePolicy request. -func (client BaseClient) GetCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatePolicySender sends the GetCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatePolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificatePolicyResponder handles the response to the GetCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetCertificates the GetCertificates operation returns the set of certificates resources in the specified key vault. -// This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -// includePending - specifies whether to include certificates which are not completely provisioned. -func (client BaseClient) GetCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificates", err.Error()) - } - - result.fn = client.getCertificatesNextResults - req, err := client.GetCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificates", resp, "Failure responding to request") - return - } - if result.clr.hasNextLink() && result.clr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificatesPreparer prepares the GetCertificates request. -func (client BaseClient) GetCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - if includePending != nil { - queryParameters["includePending"] = autorest.Encode("query", *includePending) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificatesSender sends the GetCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificatesResponder handles the response to the GetCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificatesResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificatesNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificates(ctx, vaultBaseURL, maxresults, includePending) - return -} - -// GetCertificateVersions the GetCertificateVersions operation returns the versions of a certificate in the specified -// key vault. This operation requires the certificates/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetCertificateVersions(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.clr.Response.Response != nil { - sc = result.clr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetCertificateVersions", err.Error()) - } - - result.fn = client.getCertificateVersionsNextResults - req, err := client.GetCertificateVersionsPreparer(ctx, vaultBaseURL, certificateName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.clr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure sending request") - return - } - - result.clr, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetCertificateVersions", resp, "Failure responding to request") - return - } - if result.clr.hasNextLink() && result.clr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetCertificateVersionsPreparer prepares the GetCertificateVersions request. -func (client BaseClient) GetCertificateVersionsPreparer(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetCertificateVersionsSender sends the GetCertificateVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetCertificateVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetCertificateVersionsResponder handles the response to the GetCertificateVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetCertificateVersionsResponder(resp *http.Response) (result CertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getCertificateVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getCertificateVersionsNextResults(ctx context.Context, lastResults CertificateListResult) (result CertificateListResult, err error) { - req, err := lastResults.certificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetCertificateVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetCertificateVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getCertificateVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetCertificateVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetCertificateVersionsComplete(ctx context.Context, vaultBaseURL string, certificateName string, maxresults *int32) (result CertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetCertificateVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetCertificateVersions(ctx, vaultBaseURL, certificateName, maxresults) - return -} - -// GetDeletedCertificate the GetDeletedCertificate operation retrieves the deleted certificate information plus its -// attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This -// operation requires the certificates/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) GetDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result DeletedCertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedCertificatePreparer prepares the GetDeletedCertificate request. -func (client BaseClient) GetDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificateSender sends the GetDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedCertificateResponder handles the response to the GetDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificateResponder(resp *http.Response) (result DeletedCertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedCertificates the GetDeletedCertificates operation retrieves the certificates in the current vault which -// are in a deleted state and ready for recovery or purging. This operation includes deletion-specific information. -// This operation requires the certificates/get/list permission. This operation can only be enabled on soft-delete -// enabled vaults. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -// includePending - specifies whether to include certificates which are not completely provisioned. -func (client BaseClient) GetDeletedCertificates(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.dclr.Response.Response != nil { - sc = result.dclr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedCertificates", err.Error()) - } - - result.fn = client.getDeletedCertificatesNextResults - req, err := client.GetDeletedCertificatesPreparer(ctx, vaultBaseURL, maxresults, includePending) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.dclr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure sending request") - return - } - - result.dclr, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedCertificates", resp, "Failure responding to request") - return - } - if result.dclr.hasNextLink() && result.dclr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedCertificatesPreparer prepares the GetDeletedCertificates request. -func (client BaseClient) GetDeletedCertificatesPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - if includePending != nil { - queryParameters["includePending"] = autorest.Encode("query", *includePending) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedcertificates"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedCertificatesSender sends the GetDeletedCertificates request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedCertificatesSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedCertificatesResponder handles the response to the GetDeletedCertificates request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedCertificatesResponder(resp *http.Response) (result DeletedCertificateListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedCertificatesNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedCertificatesNextResults(ctx context.Context, lastResults DeletedCertificateListResult) (result DeletedCertificateListResult, err error) { - req, err := lastResults.deletedCertificateListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedCertificatesSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedCertificatesResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedCertificatesNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedCertificatesComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedCertificatesComplete(ctx context.Context, vaultBaseURL string, maxresults *int32, includePending *bool) (result DeletedCertificateListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedCertificates") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedCertificates(ctx, vaultBaseURL, maxresults, includePending) - return -} - -// GetDeletedKey the Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation can be -// invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation requires -// the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -func (client BaseClient) GetDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result DeletedKeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedKeyPreparer prepares the GetDeletedKey request. -func (client BaseClient) GetDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeySender sends the GetDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedKeyResponder handles the response to the GetDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeyResponder(resp *http.Response) (result DeletedKeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part -// of a deleted key. This operation includes deletion-specific information. The Get Deleted Keys operation is -// applicable for vaults enabled for soft-delete. While the operation can be invoked on any vault, it will return an -// error if invoked on a non soft-delete enabled vault. This operation requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.dklr.Response.Response != nil { - sc = result.dklr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedKeys", err.Error()) - } - - result.fn = client.getDeletedKeysNextResults - req, err := client.GetDeletedKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.dklr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure sending request") - return - } - - result.dklr, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedKeys", resp, "Failure responding to request") - return - } - if result.dklr.hasNextLink() && result.dklr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedKeysPreparer prepares the GetDeletedKeys request. -func (client BaseClient) GetDeletedKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedkeys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedKeysSender sends the GetDeletedKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedKeysResponder handles the response to the GetDeletedKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedKeysResponder(resp *http.Response) (result DeletedKeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedKeysNextResults(ctx context.Context, lastResults DeletedKeyListResult) (result DeletedKeyListResult, err error) { - req, err := lastResults.deletedKeyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedKeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedSasDefinition the Get Deleted SAS Definition operation returns the specified deleted SAS definition along -// with its attributes. This operation requires the storage/getsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) GetDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result DeletedSasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinition", err.Error()) - } - - req, err := client.GetDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedSasDefinitionPreparer prepares the GetDeletedSasDefinition request. -func (client BaseClient) GetDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSasDefinitionSender sends the GetDeletedSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSasDefinitionResponder handles the response to the GetDeletedSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSasDefinitionResponder(resp *http.Response) (result DeletedSasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedSasDefinitions the Get Deleted Sas Definitions operation returns the SAS definitions that have been -// deleted for a vault enabled for soft-delete. This operation requires the storage/listsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") - defer func() { - sc := -1 - if result.dsdlr.Response.Response != nil { - sc = result.dsdlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSasDefinitions", err.Error()) - } - - result.fn = client.getDeletedSasDefinitionsNextResults - req, err := client.GetDeletedSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSasDefinitionsSender(req) - if err != nil { - result.dsdlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure sending request") - return - } - - result.dsdlr, err = client.GetDeletedSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSasDefinitions", resp, "Failure responding to request") - return - } - if result.dsdlr.hasNextLink() && result.dsdlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedSasDefinitionsPreparer prepares the GetDeletedSasDefinitions request. -func (client BaseClient) GetDeletedSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSasDefinitionsSender sends the GetDeletedSasDefinitions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSasDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSasDefinitionsResponder handles the response to the GetDeletedSasDefinitions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSasDefinitionsResponder(resp *http.Response) (result DeletedSasDefinitionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedSasDefinitionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedSasDefinitionsNextResults(ctx context.Context, lastResults DeletedSasDefinitionListResult) (result DeletedSasDefinitionListResult, err error) { - req, err := lastResults.deletedSasDefinitionListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedSasDefinitionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSasDefinitionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result DeletedSasDefinitionListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSasDefinitions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) - return -} - -// GetDeletedSecret the Get Deleted Secret operation returns the specified deleted secret along with its attributes. -// This operation requires the secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) GetDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result DeletedSecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedSecretPreparer prepares the GetDeletedSecret request. -func (client BaseClient) GetDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretSender sends the GetDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSecretResponder handles the response to the GetDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretResponder(resp *http.Response) (result DeletedSecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedSecrets the Get Deleted Secrets operation returns the secrets that have been deleted for a vault enabled -// for soft-delete. This operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.dslr.Response.Response != nil { - sc = result.dslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedSecrets", err.Error()) - } - - result.fn = client.getDeletedSecretsNextResults - req, err := client.GetDeletedSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.dslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure sending request") - return - } - - result.dslr, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedSecrets", resp, "Failure responding to request") - return - } - if result.dslr.hasNextLink() && result.dslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedSecretsPreparer prepares the GetDeletedSecrets request. -func (client BaseClient) GetDeletedSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedsecrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedSecretsSender sends the GetDeletedSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedSecretsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedSecretsResponder handles the response to the GetDeletedSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedSecretsResponder(resp *http.Response) (result DeletedSecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedSecretsNextResults(ctx context.Context, lastResults DeletedSecretListResult) (result DeletedSecretListResult, err error) { - req, err := lastResults.deletedSecretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedSecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetDeletedStorageAccount the Get Deleted Storage Account operation returns the specified deleted storage account -// along with its attributes. This operation requires the storage/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) GetDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result DeletedStorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccount", err.Error()) - } - - req, err := client.GetDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.GetDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// GetDeletedStorageAccountPreparer prepares the GetDeletedStorageAccount request. -func (client BaseClient) GetDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedStorageAccountSender sends the GetDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedStorageAccountResponder handles the response to the GetDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedStorageAccountResponder(resp *http.Response) (result DeletedStorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetDeletedStorageAccounts the Get Deleted Storage Accounts operation returns the storage accounts that have been -// deleted for a vault enabled for soft-delete. This operation requires the storage/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetDeletedStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") - defer func() { - sc := -1 - if result.dslr.Response.Response != nil { - sc = result.dslr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetDeletedStorageAccounts", err.Error()) - } - - result.fn = client.getDeletedStorageAccountsNextResults - req, err := client.GetDeletedStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", nil, "Failure preparing request") - return - } - - resp, err := client.GetDeletedStorageAccountsSender(req) - if err != nil { - result.dslr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure sending request") - return - } - - result.dslr, err = client.GetDeletedStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetDeletedStorageAccounts", resp, "Failure responding to request") - return - } - if result.dslr.hasNextLink() && result.dslr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetDeletedStorageAccountsPreparer prepares the GetDeletedStorageAccounts request. -func (client BaseClient) GetDeletedStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/deletedstorage"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetDeletedStorageAccountsSender sends the GetDeletedStorageAccounts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetDeletedStorageAccountsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetDeletedStorageAccountsResponder handles the response to the GetDeletedStorageAccounts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetDeletedStorageAccountsResponder(resp *http.Response) (result DeletedStorageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getDeletedStorageAccountsNextResults retrieves the next set of results, if any. -func (client BaseClient) getDeletedStorageAccountsNextResults(ctx context.Context, lastResults DeletedStorageListResult) (result DeletedStorageListResult, err error) { - req, err := lastResults.deletedStorageListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetDeletedStorageAccountsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetDeletedStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getDeletedStorageAccountsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetDeletedStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetDeletedStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result DeletedStorageListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetDeletedStorageAccounts") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetDeletedStorageAccounts(ctx, vaultBaseURL, maxresults) - return -} - -// GetKey the get key operation is applicable to all key types. If the requested key is symmetric, then no key material -// is released in the response. This operation requires the keys/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key to get. -// keyVersion - adding the version parameter retrieves a specific version of a key. This URI fragment is -// optional. If not specified, the latest version of the key is returned. -func (client BaseClient) GetKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure sending request") - return - } - - result, err = client.GetKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKey", resp, "Failure responding to request") - return - } - - return -} - -// GetKeyPreparer prepares the GetKey request. -func (client BaseClient) GetKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeySender sends the GetKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeyResponder handles the response to the GetKey request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetKeys retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the public part of a -// stored key. The LIST operation is applicable to all key types, however only the base key identifier, attributes, and -// tags are provided in the response. Individual versions of a key are not listed in the response. This operation -// requires the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeys(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeys", err.Error()) - } - - result.fn = client.getKeysNextResults - req, err := client.GetKeysPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeysSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeys", resp, "Failure responding to request") - return - } - if result.klr.hasNextLink() && result.klr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetKeysPreparer prepares the GetKeys request. -func (client BaseClient) GetKeysPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeysSender sends the GetKeys request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeysSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeysResponder handles the response to the GetKeys request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeysResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeysNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeysNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeysSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeysResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeysNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeysComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeysComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeys") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeys(ctx, vaultBaseURL, maxresults) - return -} - -// GetKeyVersions the full key identifier, attributes, and tags are provided in the response. This operation requires -// the keys/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetKeyVersions(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.klr.Response.Response != nil { - sc = result.klr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetKeyVersions", err.Error()) - } - - result.fn = client.getKeyVersionsNextResults - req, err := client.GetKeyVersionsPreparer(ctx, vaultBaseURL, keyName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.klr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure sending request") - return - } - - result.klr, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetKeyVersions", resp, "Failure responding to request") - return - } - if result.klr.hasNextLink() && result.klr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetKeyVersionsPreparer prepares the GetKeyVersions request. -func (client BaseClient) GetKeyVersionsPreparer(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetKeyVersionsSender sends the GetKeyVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetKeyVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetKeyVersionsResponder handles the response to the GetKeyVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetKeyVersionsResponder(resp *http.Response) (result KeyListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getKeyVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getKeyVersionsNextResults(ctx context.Context, lastResults KeyListResult) (result KeyListResult, err error) { - req, err := lastResults.keyListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetKeyVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetKeyVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getKeyVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetKeyVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetKeyVersionsComplete(ctx context.Context, vaultBaseURL string, keyName string, maxresults *int32) (result KeyListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetKeyVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetKeyVersions(ctx, vaultBaseURL, keyName, maxresults) - return -} - -// GetSasDefinition gets information about a SAS definition for the specified storage account. This operation requires -// the storage/getsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) GetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinition", err.Error()) - } - - req, err := client.GetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.GetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// GetSasDefinitionPreparer prepares the GetSasDefinition request. -func (client BaseClient) GetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionSender sends the GetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSasDefinitionResponder handles the response to the GetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSasDefinitions list storage SAS definitions for the given storage account. This operation requires the -// storage/listsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetSasDefinitions(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.sdlr.Response.Response != nil { - sc = result.sdlr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSasDefinitions", err.Error()) - } - - result.fn = client.getSasDefinitionsNextResults - req, err := client.GetSasDefinitionsPreparer(ctx, vaultBaseURL, storageAccountName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.sdlr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure sending request") - return - } - - result.sdlr, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSasDefinitions", resp, "Failure responding to request") - return - } - if result.sdlr.hasNextLink() && result.sdlr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSasDefinitionsPreparer prepares the GetSasDefinitions request. -func (client BaseClient) GetSasDefinitionsPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSasDefinitionsSender sends the GetSasDefinitions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSasDefinitionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSasDefinitionsResponder handles the response to the GetSasDefinitions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSasDefinitionsResponder(resp *http.Response) (result SasDefinitionListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSasDefinitionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSasDefinitionsNextResults(ctx context.Context, lastResults SasDefinitionListResult) (result SasDefinitionListResult, err error) { - req, err := lastResults.sasDefinitionListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSasDefinitionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSasDefinitionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSasDefinitionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSasDefinitionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSasDefinitionsComplete(ctx context.Context, vaultBaseURL string, storageAccountName string, maxresults *int32) (result SasDefinitionListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSasDefinitions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSasDefinitions(ctx, vaultBaseURL, storageAccountName, maxresults) - return -} - -// GetSecret the GET operation is applicable to any secret stored in Azure Key Vault. This operation requires the -// secrets/get permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. This URI fragment is optional. If not specified, the latest -// version of the secret is returned. -func (client BaseClient) GetSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure sending request") - return - } - - result, err = client.GetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecret", resp, "Failure responding to request") - return - } - - return -} - -// GetSecretPreparer prepares the GetSecret request. -func (client BaseClient) GetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretSender sends the GetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretResponder handles the response to the GetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetSecrets the Get Secrets operation is applicable to the entire vault. However, only the base secret identifier and -// its attributes are provided in the response. Individual secret versions are not listed in the response. This -// operation requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecrets(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecrets", err.Error()) - } - - result.fn = client.getSecretsNextResults - req, err := client.GetSecretsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecrets", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSecretsPreparer prepares the GetSecrets request. -func (client BaseClient) GetSecretsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretsSender sends the GetSecrets request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretsResponder handles the response to the GetSecrets request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecrets") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecrets(ctx, vaultBaseURL, maxresults) - return -} - -// GetSecretVersions the full secret identifier and attributes are provided in the response. No values are returned for -// the secrets. This operations requires the secrets/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// maxresults - maximum number of results to return in a page. If not specified, the service will return up to -// 25 results. -func (client BaseClient) GetSecretVersions(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetSecretVersions", err.Error()) - } - - result.fn = client.getSecretVersionsNextResults - req, err := client.GetSecretVersionsPreparer(ctx, vaultBaseURL, secretName, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", nil, "Failure preparing request") - return - } - - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure sending request") - return - } - - result.slr, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetSecretVersions", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetSecretVersionsPreparer prepares the GetSecretVersions request. -func (client BaseClient) GetSecretVersionsPreparer(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/versions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSecretVersionsSender sends the GetSecretVersions request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetSecretVersionsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetSecretVersionsResponder handles the response to the GetSecretVersions request. The method always -// closes the http.Response Body. -func (client BaseClient) GetSecretVersionsResponder(resp *http.Response) (result SecretListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getSecretVersionsNextResults retrieves the next set of results, if any. -func (client BaseClient) getSecretVersionsNextResults(ctx context.Context, lastResults SecretListResult) (result SecretListResult, err error) { - req, err := lastResults.secretListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetSecretVersionsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetSecretVersionsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getSecretVersionsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetSecretVersionsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetSecretVersionsComplete(ctx context.Context, vaultBaseURL string, secretName string, maxresults *int32) (result SecretListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetSecretVersions") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetSecretVersions(ctx, vaultBaseURL, secretName, maxresults) - return -} - -// GetStorageAccount gets information about a specified storage account. This operation requires the storage/get -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) GetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccount", err.Error()) - } - - req, err := client.GetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.GetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// GetStorageAccountPreparer prepares the GetStorageAccount request. -func (client BaseClient) GetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountSender sends the GetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetStorageAccountResponder handles the response to the GetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// GetStorageAccounts list storage accounts managed by the specified key vault. This operation requires the -// storage/list permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// maxresults - maximum number of results to return in a page. If not specified the service will return up to -// 25 results. -func (client BaseClient) GetStorageAccounts(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.slr.Response.Response != nil { - sc = result.slr.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: maxresults, - Constraints: []validation.Constraint{{Target: "maxresults", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "maxresults", Name: validation.InclusiveMaximum, Rule: int64(25), Chain: nil}, - {Target: "maxresults", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "GetStorageAccounts", err.Error()) - } - - result.fn = client.getStorageAccountsNextResults - req, err := client.GetStorageAccountsPreparer(ctx, vaultBaseURL, maxresults) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", nil, "Failure preparing request") - return - } - - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.slr.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure sending request") - return - } - - result.slr, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "GetStorageAccounts", resp, "Failure responding to request") - return - } - if result.slr.hasNextLink() && result.slr.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// GetStorageAccountsPreparer prepares the GetStorageAccounts request. -func (client BaseClient) GetStorageAccountsPreparer(ctx context.Context, vaultBaseURL string, maxresults *int32) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if maxresults != nil { - queryParameters["maxresults"] = autorest.Encode("query", *maxresults) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/storage"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetStorageAccountsSender sends the GetStorageAccounts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) GetStorageAccountsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetStorageAccountsResponder handles the response to the GetStorageAccounts request. The method always -// closes the http.Response Body. -func (client BaseClient) GetStorageAccountsResponder(resp *http.Response) (result StorageListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// getStorageAccountsNextResults retrieves the next set of results, if any. -func (client BaseClient) getStorageAccountsNextResults(ctx context.Context, lastResults StorageListResult) (result StorageListResult, err error) { - req, err := lastResults.storageListResultPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.GetStorageAccountsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure sending next results request") - } - result, err = client.GetStorageAccountsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "getStorageAccountsNextResults", resp, "Failure responding to next results request") - } - return -} - -// GetStorageAccountsComplete enumerates all values, automatically crossing page boundaries as required. -func (client BaseClient) GetStorageAccountsComplete(ctx context.Context, vaultBaseURL string, maxresults *int32) (result StorageListResultIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.GetStorageAccounts") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.GetStorageAccounts(ctx, vaultBaseURL, maxresults) - return -} - -// ImportCertificate imports an existing valid certificate, containing a private key, into Azure Key Vault. The -// certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM format the PEM file must -// contain the key as well as x509 certificates. This operation requires the certificates/import permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to import the certificate. -func (client BaseClient) ImportCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: certificateName, - Constraints: []validation.Constraint{{Target: "certificateName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Base64EncodedCertificate", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.CertificatePolicy", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.CertificatePolicy.X509CertificateProperties.ValidityInMonths", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportCertificate", err.Error()) - } - - req, err := client.ImportCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.ImportCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure sending request") - return - } - - result, err = client.ImportCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportCertificate", resp, "Failure responding to request") - return - } - - return -} - -// ImportCertificatePreparer prepares the ImportCertificate request. -func (client BaseClient) ImportCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/import", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportCertificateSender sends the ImportCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ImportCertificateResponder handles the response to the ImportCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ImportKey the import key operation may be used to import any key type into an Azure Key Vault. If the named key -// already exists, Azure Key Vault creates a new version of the key. This operation requires the keys/import -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - name for the imported key. -// parameters - the parameters to import a key. -func (client BaseClient) ImportKey(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.ImportKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: keyName, - Constraints: []validation.Constraint{{Target: "keyName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Key", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "ImportKey", err.Error()) - } - - req, err := client.ImportKeyPreparer(ctx, vaultBaseURL, keyName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", nil, "Failure preparing request") - return - } - - resp, err := client.ImportKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure sending request") - return - } - - result, err = client.ImportKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "ImportKey", resp, "Failure responding to request") - return - } - - return -} - -// ImportKeyPreparer prepares the ImportKey request. -func (client BaseClient) ImportKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, parameters KeyImportParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ImportKeySender sends the ImportKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) ImportKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ImportKeyResponder handles the response to the ImportKey request. The method always -// closes the http.Response Body. -func (client BaseClient) ImportKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// MergeCertificate the MergeCertificate operation performs the merging of a certificate or certificate chain with a -// key pair currently available in the service. This operation requires the certificates/create permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// parameters - the parameters to merge certificate. -func (client BaseClient) MergeCertificate(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.MergeCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.X509Certificates", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "MergeCertificate", err.Error()) - } - - req, err := client.MergeCertificatePreparer(ctx, vaultBaseURL, certificateName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.MergeCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure sending request") - return - } - - result, err = client.MergeCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "MergeCertificate", resp, "Failure responding to request") - return - } - - return -} - -// MergeCertificatePreparer prepares the MergeCertificate request. -func (client BaseClient) MergeCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, parameters CertificateMergeParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending/merge", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// MergeCertificateSender sends the MergeCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) MergeCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// MergeCertificateResponder handles the response to the MergeCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) MergeCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// PurgeDeletedCertificate the PurgeDeletedCertificate operation performs an irreversible deletion of the specified -// certificate, without possibility for recovery. The operation is not available if the recovery level does not specify -// 'Purgeable'. This operation requires the certificate/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate -func (client BaseClient) PurgeDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedCertificate") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedCertificateSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedCertificatePreparer prepares the PurgeDeletedCertificate request. -func (client BaseClient) PurgeDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedCertificateSender sends the PurgeDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedCertificateResponder handles the response to the PurgeDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedCertificateResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedKey the Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the operation -// can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled vault. This operation -// requires the keys/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key -func (client BaseClient) PurgeDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedKey") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedKeySender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedKeyPreparer prepares the PurgeDeletedKey request. -func (client BaseClient) PurgeDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedKeySender sends the PurgeDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedKeyResponder handles the response to the PurgeDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedKeyResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedSecret the purge deleted secret operation removes the secret permanently, without the possibility of -// recovery. This operation can only be enabled on a soft-delete enabled vault. This operation requires the -// secrets/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -func (client BaseClient) PurgeDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedSecret") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.PurgeDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedSecretSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedSecretPreparer prepares the PurgeDeletedSecret request. -func (client BaseClient) PurgeDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedSecretSender sends the PurgeDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedSecretResponder handles the response to the PurgeDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedSecretResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// PurgeDeletedStorageAccount the purge deleted storage account operation removes the secret permanently, without the -// possibility of recovery. This operation can only be performed on a soft-delete enabled vault. This operation -// requires the storage/purge permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) PurgeDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.PurgeDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "PurgeDeletedStorageAccount", err.Error()) - } - - req, err := client.PurgeDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.PurgeDeletedStorageAccountSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.PurgeDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "PurgeDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// PurgeDeletedStorageAccountPreparer prepares the PurgeDeletedStorageAccount request. -func (client BaseClient) PurgeDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// PurgeDeletedStorageAccountSender sends the PurgeDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) PurgeDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// PurgeDeletedStorageAccountResponder handles the response to the PurgeDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) PurgeDeletedStorageAccountResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// RecoverDeletedCertificate the RecoverDeletedCertificate operation performs the reversal of the Delete operation. The -// operation is applicable in vaults enabled for soft-delete, and must be issued during the retention interval -// (available in the deleted certificate's attributes). This operation requires the certificates/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the deleted certificate -func (client BaseClient) RecoverDeletedCertificate(ctx context.Context, vaultBaseURL string, certificateName string) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedCertificatePreparer(ctx, vaultBaseURL, certificateName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedCertificate", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedCertificatePreparer prepares the RecoverDeletedCertificate request. -func (client BaseClient) RecoverDeletedCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedcertificates/{certificate-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedCertificateSender sends the RecoverDeletedCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedCertificateResponder handles the response to the RecoverDeletedCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedKey the Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults. It -// recovers the deleted key back to its latest version under /keys. An attempt to recover an non-deleted key will -// return an error. Consider this the inverse of the delete operation on soft-delete enabled vaults. This operation -// requires the keys/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the deleted key. -func (client BaseClient) RecoverDeletedKey(ctx context.Context, vaultBaseURL string, keyName string) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedKeyPreparer(ctx, vaultBaseURL, keyName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedKey", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedKeyPreparer prepares the RecoverDeletedKey request. -func (client BaseClient) RecoverDeletedKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedkeys/{key-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedKeySender sends the RecoverDeletedKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedKeyResponder handles the response to the RecoverDeletedKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedSasDefinition recovers the deleted SAS definition for the specified storage account. This operation -// can only be performed on a soft-delete enabled vault. This operation requires the storage/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -func (client BaseClient) RecoverDeletedSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedSasDefinition", err.Error()) - } - - req, err := client.RecoverDeletedSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedSasDefinitionPreparer prepares the RecoverDeletedSasDefinition request. -func (client BaseClient) RecoverDeletedSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/sas/{sas-definition-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedSasDefinitionSender sends the RecoverDeletedSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedSasDefinitionResponder handles the response to the RecoverDeletedSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedSecret recovers the deleted secret in the specified vault. This operation can only be performed on a -// soft-delete enabled vault. This operation requires the secrets/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the deleted secret. -func (client BaseClient) RecoverDeletedSecret(ctx context.Context, vaultBaseURL string, secretName string) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.RecoverDeletedSecretPreparer(ctx, vaultBaseURL, secretName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedSecret", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedSecretPreparer prepares the RecoverDeletedSecret request. -func (client BaseClient) RecoverDeletedSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedsecrets/{secret-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedSecretSender sends the RecoverDeletedSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedSecretResponder handles the response to the RecoverDeletedSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RecoverDeletedStorageAccount recovers the deleted storage account in the specified vault. This operation can only be -// performed on a soft-delete enabled vault. This operation requires the storage/recover permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -func (client BaseClient) RecoverDeletedStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RecoverDeletedStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RecoverDeletedStorageAccount", err.Error()) - } - - req, err := client.RecoverDeletedStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.RecoverDeletedStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.RecoverDeletedStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RecoverDeletedStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// RecoverDeletedStorageAccountPreparer prepares the RecoverDeletedStorageAccount request. -func (client BaseClient) RecoverDeletedStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/deletedstorage/{storage-account-name}/recover", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RecoverDeletedStorageAccountSender sends the RecoverDeletedStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RecoverDeletedStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RecoverDeletedStorageAccountResponder handles the response to the RecoverDeletedStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) RecoverDeletedStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RegenerateStorageAccountKey regenerates the specified key value for the given storage account. This operation -// requires the storage/regeneratekey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to regenerate storage account key. -func (client BaseClient) RegenerateStorageAccountKey(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RegenerateStorageAccountKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RegenerateStorageAccountKey", err.Error()) - } - - req, err := client.RegenerateStorageAccountKeyPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", nil, "Failure preparing request") - return - } - - resp, err := client.RegenerateStorageAccountKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure sending request") - return - } - - result, err = client.RegenerateStorageAccountKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RegenerateStorageAccountKey", resp, "Failure responding to request") - return - } - - return -} - -// RegenerateStorageAccountKeyPreparer prepares the RegenerateStorageAccountKey request. -func (client BaseClient) RegenerateStorageAccountKeyPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountRegenerteKeyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/regeneratekey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RegenerateStorageAccountKeySender sends the RegenerateStorageAccountKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RegenerateStorageAccountKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RegenerateStorageAccountKeyResponder handles the response to the RegenerateStorageAccountKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RegenerateStorageAccountKeyResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreCertificate restores a backed up certificate, and all its versions, to a vault. This operation requires the -// certificates/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the certificate. -func (client BaseClient) RestoreCertificate(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.CertificateBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreCertificate", err.Error()) - } - - req, err := client.RestoreCertificatePreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure sending request") - return - } - - result, err = client.RestoreCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreCertificate", resp, "Failure responding to request") - return - } - - return -} - -// RestoreCertificatePreparer prepares the RestoreCertificate request. -func (client BaseClient) RestoreCertificatePreparer(ctx context.Context, vaultBaseURL string, parameters CertificateRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreCertificateSender sends the RestoreCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreCertificateResponder handles the response to the RestoreCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreKey imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier, -// attributes and access control policies. The RESTORE operation may be used to import a previously backed up key. -// Individual versions of a key cannot be restored. The key is restored in its entirety with the same key name as it -// had when it was backed up. If the key name is not available in the target Key Vault, the RESTORE operation will be -// rejected. While the key name is retained during restore, the final key identifier will change if the key is restored -// to a different vault. Restore will restore all versions and preserve version identifiers. The RESTORE operation is -// subject to security constraints: The target Key Vault must be owned by the same Microsoft Azure Subscription as the -// source Key Vault The user must have RESTORE permission in the target Key Vault. This operation requires the -// keys/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the key. -func (client BaseClient) RestoreKey(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.KeyBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreKey", err.Error()) - } - - req, err := client.RestoreKeyPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure sending request") - return - } - - result, err = client.RestoreKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreKey", resp, "Failure responding to request") - return - } - - return -} - -// RestoreKeyPreparer prepares the RestoreKey request. -func (client BaseClient) RestoreKeyPreparer(ctx context.Context, vaultBaseURL string, parameters KeyRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/keys/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreKeySender sends the RestoreKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreKeyResponder handles the response to the RestoreKey request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreSecret restores a backed up secret, and all its versions, to a vault. This operation requires the -// secrets/restore permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the secret. -func (client BaseClient) RestoreSecret(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.SecretBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreSecret", err.Error()) - } - - req, err := client.RestoreSecretPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure sending request") - return - } - - result, err = client.RestoreSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreSecret", resp, "Failure responding to request") - return - } - - return -} - -// RestoreSecretPreparer prepares the RestoreSecret request. -func (client BaseClient) RestoreSecretPreparer(ctx context.Context, vaultBaseURL string, parameters SecretRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/secrets/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreSecretSender sends the RestoreSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreSecretResponder handles the response to the RestoreSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// RestoreStorageAccount restores a backed up storage account to a vault. This operation requires the storage/restore -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// parameters - the parameters to restore the storage account. -func (client BaseClient) RestoreStorageAccount(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.RestoreStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.StorageBundleBackup", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "RestoreStorageAccount", err.Error()) - } - - req, err := client.RestoreStorageAccountPreparer(ctx, vaultBaseURL, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.RestoreStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.RestoreStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "RestoreStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// RestoreStorageAccountPreparer prepares the RestoreStorageAccount request. -func (client BaseClient) RestoreStorageAccountPreparer(ctx context.Context, vaultBaseURL string, parameters StorageRestoreParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/storage/restore"), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// RestoreStorageAccountSender sends the RestoreStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) RestoreStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// RestoreStorageAccountResponder handles the response to the RestoreStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) RestoreStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateContacts sets the certificate contacts for the specified key vault. This operation requires the -// certificates/managecontacts permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// contacts - the contacts for the key vault certificate. -func (client BaseClient) SetCertificateContacts(ctx context.Context, vaultBaseURL string, contacts Contacts) (result Contacts, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateContacts") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.SetCertificateContactsPreparer(ctx, vaultBaseURL, contacts) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateContactsSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateContactsResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateContacts", resp, "Failure responding to request") - return - } - - return -} - -// SetCertificateContactsPreparer prepares the SetCertificateContacts request. -func (client BaseClient) SetCertificateContactsPreparer(ctx context.Context, vaultBaseURL string, contacts Contacts) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - contacts.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPath("/certificates/contacts"), - autorest.WithJSON(contacts), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateContactsSender sends the SetCertificateContacts request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateContactsSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetCertificateContactsResponder handles the response to the SetCertificateContacts request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateContactsResponder(resp *http.Response) (result Contacts, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetCertificateIssuer the SetCertificateIssuer operation adds or updates the specified certificate issuer. This -// operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer set parameter. -func (client BaseClient) SetCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameter, - Constraints: []validation.Constraint{{Target: "parameter.Provider", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetCertificateIssuer", err.Error()) - } - - req, err := client.SetCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.SetCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.SetCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// SetCertificateIssuerPreparer prepares the SetCertificateIssuer request. -func (client BaseClient) SetCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetCertificateIssuerSender sends the SetCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetCertificateIssuerResponder handles the response to the SetCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) SetCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSasDefinition creates or updates a new SAS definition for the specified storage account. This operation requires -// the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to create a SAS definition. -func (client BaseClient) SetSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.TemplateURI", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ValidityPeriod", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSasDefinition", err.Error()) - } - - req, err := client.SetSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.SetSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.SetSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// SetSasDefinitionPreparer prepares the SetSasDefinition request. -func (client BaseClient) SetSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSasDefinitionSender sends the SetSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetSasDefinitionResponder handles the response to the SetSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetSecret the SET operation adds a secret to the Azure Key Vault. If the named secret already exists, Azure Key -// Vault creates a new version of that secret. This operation requires the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// parameters - the parameters for setting the secret. -func (client BaseClient) SetSecret(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: secretName, - Constraints: []validation.Constraint{{Target: "secretName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z-]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetSecret", err.Error()) - } - - req, err := client.SetSecretPreparer(ctx, vaultBaseURL, secretName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", nil, "Failure preparing request") - return - } - - resp, err := client.SetSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure sending request") - return - } - - result, err = client.SetSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetSecret", resp, "Failure responding to request") - return - } - - return -} - -// SetSecretPreparer prepares the SetSecret request. -func (client BaseClient) SetSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, parameters SecretSetParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetSecretSender sends the SetSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetSecretResponder handles the response to the SetSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) SetSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// SetStorageAccount creates or updates a new storage account. This operation requires the storage/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to create a storage account. -func (client BaseClient) SetStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.SetStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.ResourceID", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.ActiveKeyName", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AutoRegenerateKey", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "SetStorageAccount", err.Error()) - } - - req, err := client.SetStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.SetStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.SetStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "SetStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// SetStorageAccountPreparer prepares the SetStorageAccount request. -func (client BaseClient) SetStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountCreateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SetStorageAccountSender sends the SetStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SetStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SetStorageAccountResponder handles the response to the SetStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) SetStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Sign the SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault since this -// operation uses the private portion of the key. This operation requires the keys/sign permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the signing operation. -func (client BaseClient) Sign(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Sign") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Sign", err.Error()) - } - - req, err := client.SignPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", nil, "Failure preparing request") - return - } - - resp, err := client.SignSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure sending request") - return - } - - result, err = client.SignResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Sign", resp, "Failure responding to request") - return - } - - return -} - -// SignPreparer prepares the Sign request. -func (client BaseClient) SignPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeySignParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/sign", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// SignSender sends the Sign request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) SignSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// SignResponder handles the response to the Sign request. The method always -// closes the http.Response Body. -func (client BaseClient) SignResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UnwrapKey the UNWRAP operation supports decryption of a symmetric key using the target key encryption key. This -// operation is the reverse of the WRAP operation. The UNWRAP operation applies to asymmetric and symmetric keys stored -// in Azure Key Vault since it uses the private portion of the key. This operation requires the keys/unwrapKey -// permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for the key operation. -func (client BaseClient) UnwrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UnwrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UnwrapKey", err.Error()) - } - - req, err := client.UnwrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.UnwrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure sending request") - return - } - - result, err = client.UnwrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UnwrapKey", resp, "Failure responding to request") - return - } - - return -} - -// UnwrapKeyPreparer prepares the UnwrapKey request. -func (client BaseClient) UnwrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/unwrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UnwrapKeySender sends the UnwrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UnwrapKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UnwrapKeyResponder handles the response to the UnwrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UnwrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificate the UpdateCertificate operation applies the specified update on the given certificate; the only -// elements updated are the certificate's attributes. This operation requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given key vault. -// certificateVersion - the version of the certificate. -// parameters - the parameters for certificate update. -func (client BaseClient) UpdateCertificate(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (result CertificateBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePreparer(ctx, vaultBaseURL, certificateName, certificateVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificate", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificatePreparer prepares the UpdateCertificate request. -func (client BaseClient) UpdateCertificatePreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateVersion string, parameters CertificateUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - "certificate-version": autorest.Encode("path", certificateVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/{certificate-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateSender sends the UpdateCertificate request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateResponder handles the response to the UpdateCertificate request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateResponder(resp *http.Response) (result CertificateBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateIssuer the UpdateCertificateIssuer operation performs an update on the specified certificate issuer -// entity. This operation requires the certificates/setissuers permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// issuerName - the name of the issuer. -// parameter - certificate issuer update parameter. -func (client BaseClient) UpdateCertificateIssuer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (result IssuerBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateIssuer") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateIssuerPreparer(ctx, vaultBaseURL, issuerName, parameter) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateIssuerSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateIssuerResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateIssuer", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificateIssuerPreparer prepares the UpdateCertificateIssuer request. -func (client BaseClient) UpdateCertificateIssuerPreparer(ctx context.Context, vaultBaseURL string, issuerName string, parameter CertificateIssuerUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "issuer-name": autorest.Encode("path", issuerName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/issuers/{issuer-name}", pathParameters), - autorest.WithJSON(parameter), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateIssuerSender sends the UpdateCertificateIssuer request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateIssuerSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateIssuerResponder handles the response to the UpdateCertificateIssuer request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateIssuerResponder(resp *http.Response) (result IssuerBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificateOperation updates a certificate creation operation that is already in progress. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate. -// certificateOperation - the certificate operation response. -func (client BaseClient) UpdateCertificateOperation(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (result CertificateOperation, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificateOperation") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificateOperationPreparer(ctx, vaultBaseURL, certificateName, certificateOperation) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificateOperationSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificateOperationResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificateOperation", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificateOperationPreparer prepares the UpdateCertificateOperation request. -func (client BaseClient) UpdateCertificateOperationPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificateOperation CertificateOperationUpdateParameter) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/pending", pathParameters), - autorest.WithJSON(certificateOperation), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificateOperationSender sends the UpdateCertificateOperation request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificateOperationSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificateOperationResponder handles the response to the UpdateCertificateOperation request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificateOperationResponder(resp *http.Response) (result CertificateOperation, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateCertificatePolicy set specified members in the certificate policy. Leave others as null. This operation -// requires the certificates/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// certificateName - the name of the certificate in the given vault. -// certificatePolicy - the policy for the certificate. -func (client BaseClient) UpdateCertificatePolicy(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (result CertificatePolicy, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateCertificatePolicy") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateCertificatePolicyPreparer(ctx, vaultBaseURL, certificateName, certificatePolicy) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateCertificatePolicySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure sending request") - return - } - - result, err = client.UpdateCertificatePolicyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateCertificatePolicy", resp, "Failure responding to request") - return - } - - return -} - -// UpdateCertificatePolicyPreparer prepares the UpdateCertificatePolicy request. -func (client BaseClient) UpdateCertificatePolicyPreparer(ctx context.Context, vaultBaseURL string, certificateName string, certificatePolicy CertificatePolicy) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "certificate-name": autorest.Encode("path", certificateName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - certificatePolicy.ID = nil - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/certificates/{certificate-name}/policy", pathParameters), - autorest.WithJSON(certificatePolicy), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateCertificatePolicySender sends the UpdateCertificatePolicy request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateCertificatePolicySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateCertificatePolicyResponder handles the response to the UpdateCertificatePolicy request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateCertificatePolicyResponder(resp *http.Response) (result CertificatePolicy, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateKey in order to perform this operation, the key must already exist in the Key Vault. Note: The cryptographic -// material of a key itself cannot be changed. This operation requires the keys/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of key to update. -// keyVersion - the version of the key to update. -// parameters - the parameters of the key to update. -func (client BaseClient) UpdateKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (result KeyBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure sending request") - return - } - - result, err = client.UpdateKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateKey", resp, "Failure responding to request") - return - } - - return -} - -// UpdateKeyPreparer prepares the UpdateKey request. -func (client BaseClient) UpdateKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateKeySender sends the UpdateKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateKeyResponder handles the response to the UpdateKey request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateKeyResponder(resp *http.Response) (result KeyBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSasDefinition updates the specified attributes associated with the given SAS definition. This operation -// requires the storage/setsas permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// sasDefinitionName - the name of the SAS definition. -// parameters - the parameters to update a SAS definition. -func (client BaseClient) UpdateSasDefinition(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (result SasDefinitionBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSasDefinition") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}, - {TargetValue: sasDefinitionName, - Constraints: []validation.Constraint{{Target: "sasDefinitionName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateSasDefinition", err.Error()) - } - - req, err := client.UpdateSasDefinitionPreparer(ctx, vaultBaseURL, storageAccountName, sasDefinitionName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSasDefinitionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure sending request") - return - } - - result, err = client.UpdateSasDefinitionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSasDefinition", resp, "Failure responding to request") - return - } - - return -} - -// UpdateSasDefinitionPreparer prepares the UpdateSasDefinition request. -func (client BaseClient) UpdateSasDefinitionPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, sasDefinitionName string, parameters SasDefinitionUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "sas-definition-name": autorest.Encode("path", sasDefinitionName), - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}/sas/{sas-definition-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSasDefinitionSender sends the UpdateSasDefinition request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSasDefinitionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateSasDefinitionResponder handles the response to the UpdateSasDefinition request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSasDefinitionResponder(resp *http.Response) (result SasDefinitionBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateSecret the UPDATE operation changes specified attributes of an existing stored secret. Attributes that are not -// specified in the request are left unchanged. The value of a secret itself cannot be changed. This operation requires -// the secrets/set permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// secretName - the name of the secret. -// secretVersion - the version of the secret. -// parameters - the parameters for update secret operation. -func (client BaseClient) UpdateSecret(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (result SecretBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateSecret") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdateSecretPreparer(ctx, vaultBaseURL, secretName, secretVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSecretSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure sending request") - return - } - - result, err = client.UpdateSecretResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateSecret", resp, "Failure responding to request") - return - } - - return -} - -// UpdateSecretPreparer prepares the UpdateSecret request. -func (client BaseClient) UpdateSecretPreparer(ctx context.Context, vaultBaseURL string, secretName string, secretVersion string, parameters SecretUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "secret-name": autorest.Encode("path", secretName), - "secret-version": autorest.Encode("path", secretVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/secrets/{secret-name}/{secret-version}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSecretSender sends the UpdateSecret request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateSecretSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateSecretResponder handles the response to the UpdateSecret request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateSecretResponder(resp *http.Response) (result SecretBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// UpdateStorageAccount updates the specified attributes associated with the given storage account. This operation -// requires the storage/set/update permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// storageAccountName - the name of the storage account. -// parameters - the parameters to update a storage account. -func (client BaseClient) UpdateStorageAccount(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (result StorageBundle, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.UpdateStorageAccount") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: storageAccountName, - Constraints: []validation.Constraint{{Target: "storageAccountName", Name: validation.Pattern, Rule: `^[0-9a-zA-Z]+$`, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "UpdateStorageAccount", err.Error()) - } - - req, err := client.UpdateStorageAccountPreparer(ctx, vaultBaseURL, storageAccountName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateStorageAccountSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure sending request") - return - } - - result, err = client.UpdateStorageAccountResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "UpdateStorageAccount", resp, "Failure responding to request") - return - } - - return -} - -// UpdateStorageAccountPreparer prepares the UpdateStorageAccount request. -func (client BaseClient) UpdateStorageAccountPreparer(ctx context.Context, vaultBaseURL string, storageAccountName string, parameters StorageAccountUpdateParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "storage-account-name": autorest.Encode("path", storageAccountName), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/storage/{storage-account-name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateStorageAccountSender sends the UpdateStorageAccount request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) UpdateStorageAccountSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// UpdateStorageAccountResponder handles the response to the UpdateStorageAccount request. The method always -// closes the http.Response Body. -func (client BaseClient) UpdateStorageAccountResponder(resp *http.Response) (result StorageBundle, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Verify the VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not strictly -// necessary for asymmetric keys stored in Azure Key Vault since signature verification can be performed using the -// public portion of the key but this operation is supported as a convenience for callers that only have a -// key-reference and not the public portion of the key. This operation requires the keys/verify permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for verify operations. -func (client BaseClient) Verify(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (result KeyVerifyResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.Verify") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Digest", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.Signature", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "Verify", err.Error()) - } - - req, err := client.VerifyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", nil, "Failure preparing request") - return - } - - resp, err := client.VerifySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure sending request") - return - } - - result, err = client.VerifyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "Verify", resp, "Failure responding to request") - return - } - - return -} - -// VerifyPreparer prepares the Verify request. -func (client BaseClient) VerifyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyVerifyParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/verify", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// VerifySender sends the Verify request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) VerifySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// VerifyResponder handles the response to the Verify request. The method always -// closes the http.Response Body. -func (client BaseClient) VerifyResponder(resp *http.Response) (result KeyVerifyResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// WrapKey the WRAP operation supports encryption of a symmetric key using a key encryption key that has previously -// been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for symmetric keys stored in Azure -// Key Vault since protection with an asymmetric key can be performed using the public portion of the key. This -// operation is supported for asymmetric keys as a convenience for callers that have a key-reference but do not have -// access to the public key material. This operation requires the keys/wrapKey permission. -// Parameters: -// vaultBaseURL - the vault name, for example https://myvault.vault.azure.net. -// keyName - the name of the key. -// keyVersion - the version of the key. -// parameters - the parameters for wrap operation. -func (client BaseClient) WrapKey(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (result KeyOperationResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/BaseClient.WrapKey") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.Value", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("keyvault.BaseClient", "WrapKey", err.Error()) - } - - req, err := client.WrapKeyPreparer(ctx, vaultBaseURL, keyName, keyVersion, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", nil, "Failure preparing request") - return - } - - resp, err := client.WrapKeySender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure sending request") - return - } - - result, err = client.WrapKeyResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "keyvault.BaseClient", "WrapKey", resp, "Failure responding to request") - return - } - - return -} - -// WrapKeyPreparer prepares the WrapKey request. -func (client BaseClient) WrapKeyPreparer(ctx context.Context, vaultBaseURL string, keyName string, keyVersion string, parameters KeyOperationsParameters) (*http.Request, error) { - urlParameters := map[string]interface{}{ - "vaultBaseUrl": vaultBaseURL, - } - - pathParameters := map[string]interface{}{ - "key-name": autorest.Encode("path", keyName), - "key-version": autorest.Encode("path", keyVersion), - } - - const APIVersion = "7.0" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithCustomBaseURL("{vaultBaseUrl}", urlParameters), - autorest.WithPathParameters("/keys/{key-name}/{key-version}/wrapkey", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// WrapKeySender sends the WrapKey request. The method will close the -// http.Response Body if it receives an error. -func (client BaseClient) WrapKeySender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// WrapKeyResponder handles the response to the WrapKey request. The method always -// closes the http.Response Body. -func (client BaseClient) WrapKeyResponder(resp *http.Response) (result KeyOperationResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/dataplane_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/dataplane_meta.json deleted file mode 100644 index 65071c997f5..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/dataplane_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", - "tag": "package-7.0", - "use": "@microsoft.azure/autorest.go@2.1.183", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-7.0 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go deleted file mode 100644 index 7733a009aff..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/enums.go +++ /dev/null @@ -1,229 +0,0 @@ -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// ActionType enumerates the values for action type. -type ActionType string - -const ( - // AutoRenew ... - AutoRenew ActionType = "AutoRenew" - // EmailContacts ... - EmailContacts ActionType = "EmailContacts" -) - -// PossibleActionTypeValues returns an array of possible values for the ActionType const type. -func PossibleActionTypeValues() []ActionType { - return []ActionType{AutoRenew, EmailContacts} -} - -// DeletionRecoveryLevel enumerates the values for deletion recovery level. -type DeletionRecoveryLevel string - -const ( - // CustomizedRecoverable Denotes a vault state in which deletion is recoverable without the possibility for - // immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level - // guarantees the recoverability of the deleted entity during the retention interval and while the - // subscription is still available. - CustomizedRecoverable DeletionRecoveryLevel = "CustomizedRecoverable" - // CustomizedRecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is - // recoverable, immediate and permanent deletion (i.e. purge) is not permitted, and in which the - // subscription itself cannot be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level - // guarantees the recoverability of the deleted entity during the retention interval, and also reflects the - // fact that the subscription itself cannot be cancelled. - CustomizedRecoverableProtectedSubscription DeletionRecoveryLevel = "CustomizedRecoverable+ProtectedSubscription" - // CustomizedRecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also - // permits immediate and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This - // level guarantees the recoverability of the deleted entity during the retention interval, unless a Purge - // operation is requested, or the subscription is cancelled. - CustomizedRecoverablePurgeable DeletionRecoveryLevel = "CustomizedRecoverable+Purgeable" - // Purgeable Denotes a vault state in which deletion is an irreversible operation, without the possibility - // for recovery. This level corresponds to no protection being available against a Delete operation; the - // data is irretrievably lost upon accepting a Delete operation at the entity level or higher (vault, - // resource group, subscription etc.) - Purgeable DeletionRecoveryLevel = "Purgeable" - // Recoverable Denotes a vault state in which deletion is recoverable without the possibility for immediate - // and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity - // during the retention interval(90 days) and while the subscription is still available. System wil - // permanently delete it after 90 days, if not recovered - Recoverable DeletionRecoveryLevel = "Recoverable" - // RecoverableProtectedSubscription Denotes a vault and subscription state in which deletion is recoverable - // within retention interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and - // in which the subscription itself cannot be permanently canceled. System wil permanently delete it after - // 90 days, if not recovered - RecoverableProtectedSubscription DeletionRecoveryLevel = "Recoverable+ProtectedSubscription" - // RecoverablePurgeable Denotes a vault state in which deletion is recoverable, and which also permits - // immediate and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted - // entity during the retention interval (90 days), unless a Purge operation is requested, or the - // subscription is cancelled. System wil permanently delete it after 90 days, if not recovered - RecoverablePurgeable DeletionRecoveryLevel = "Recoverable+Purgeable" -) - -// PossibleDeletionRecoveryLevelValues returns an array of possible values for the DeletionRecoveryLevel const type. -func PossibleDeletionRecoveryLevelValues() []DeletionRecoveryLevel { - return []DeletionRecoveryLevel{CustomizedRecoverable, CustomizedRecoverableProtectedSubscription, CustomizedRecoverablePurgeable, Purgeable, Recoverable, RecoverableProtectedSubscription, RecoverablePurgeable} -} - -// JSONWebKeyCurveName enumerates the values for json web key curve name. -type JSONWebKeyCurveName string - -const ( - // P256 The NIST P-256 elliptic curve, AKA SECG curve SECP256R1. - P256 JSONWebKeyCurveName = "P-256" - // P256K The SECG SECP256K1 elliptic curve. - P256K JSONWebKeyCurveName = "P-256K" - // P384 The NIST P-384 elliptic curve, AKA SECG curve SECP384R1. - P384 JSONWebKeyCurveName = "P-384" - // P521 The NIST P-521 elliptic curve, AKA SECG curve SECP521R1. - P521 JSONWebKeyCurveName = "P-521" -) - -// PossibleJSONWebKeyCurveNameValues returns an array of possible values for the JSONWebKeyCurveName const type. -func PossibleJSONWebKeyCurveNameValues() []JSONWebKeyCurveName { - return []JSONWebKeyCurveName{P256, P256K, P384, P521} -} - -// JSONWebKeyEncryptionAlgorithm enumerates the values for json web key encryption algorithm. -type JSONWebKeyEncryptionAlgorithm string - -const ( - // RSA15 ... - RSA15 JSONWebKeyEncryptionAlgorithm = "RSA1_5" - // RSAOAEP ... - RSAOAEP JSONWebKeyEncryptionAlgorithm = "RSA-OAEP" - // RSAOAEP256 ... - RSAOAEP256 JSONWebKeyEncryptionAlgorithm = "RSA-OAEP-256" -) - -// PossibleJSONWebKeyEncryptionAlgorithmValues returns an array of possible values for the JSONWebKeyEncryptionAlgorithm const type. -func PossibleJSONWebKeyEncryptionAlgorithmValues() []JSONWebKeyEncryptionAlgorithm { - return []JSONWebKeyEncryptionAlgorithm{RSA15, RSAOAEP, RSAOAEP256} -} - -// JSONWebKeyOperation enumerates the values for json web key operation. -type JSONWebKeyOperation string - -const ( - // Decrypt ... - Decrypt JSONWebKeyOperation = "decrypt" - // Encrypt ... - Encrypt JSONWebKeyOperation = "encrypt" - // Sign ... - Sign JSONWebKeyOperation = "sign" - // UnwrapKey ... - UnwrapKey JSONWebKeyOperation = "unwrapKey" - // Verify ... - Verify JSONWebKeyOperation = "verify" - // WrapKey ... - WrapKey JSONWebKeyOperation = "wrapKey" -) - -// PossibleJSONWebKeyOperationValues returns an array of possible values for the JSONWebKeyOperation const type. -func PossibleJSONWebKeyOperationValues() []JSONWebKeyOperation { - return []JSONWebKeyOperation{Decrypt, Encrypt, Sign, UnwrapKey, Verify, WrapKey} -} - -// JSONWebKeySignatureAlgorithm enumerates the values for json web key signature algorithm. -type JSONWebKeySignatureAlgorithm string - -const ( - // ES256 ECDSA using P-256 and SHA-256, as described in https://tools.ietf.org/html/rfc7518. - ES256 JSONWebKeySignatureAlgorithm = "ES256" - // ES256K ECDSA using P-256K and SHA-256, as described in https://tools.ietf.org/html/rfc7518 - ES256K JSONWebKeySignatureAlgorithm = "ES256K" - // ES384 ECDSA using P-384 and SHA-384, as described in https://tools.ietf.org/html/rfc7518 - ES384 JSONWebKeySignatureAlgorithm = "ES384" - // ES512 ECDSA using P-521 and SHA-512, as described in https://tools.ietf.org/html/rfc7518 - ES512 JSONWebKeySignatureAlgorithm = "ES512" - // PS256 RSASSA-PSS using SHA-256 and MGF1 with SHA-256, as described in - // https://tools.ietf.org/html/rfc7518 - PS256 JSONWebKeySignatureAlgorithm = "PS256" - // PS384 RSASSA-PSS using SHA-384 and MGF1 with SHA-384, as described in - // https://tools.ietf.org/html/rfc7518 - PS384 JSONWebKeySignatureAlgorithm = "PS384" - // PS512 RSASSA-PSS using SHA-512 and MGF1 with SHA-512, as described in - // https://tools.ietf.org/html/rfc7518 - PS512 JSONWebKeySignatureAlgorithm = "PS512" - // RS256 RSASSA-PKCS1-v1_5 using SHA-256, as described in https://tools.ietf.org/html/rfc7518 - RS256 JSONWebKeySignatureAlgorithm = "RS256" - // RS384 RSASSA-PKCS1-v1_5 using SHA-384, as described in https://tools.ietf.org/html/rfc7518 - RS384 JSONWebKeySignatureAlgorithm = "RS384" - // RS512 RSASSA-PKCS1-v1_5 using SHA-512, as described in https://tools.ietf.org/html/rfc7518 - RS512 JSONWebKeySignatureAlgorithm = "RS512" - // RSNULL Reserved - RSNULL JSONWebKeySignatureAlgorithm = "RSNULL" -) - -// PossibleJSONWebKeySignatureAlgorithmValues returns an array of possible values for the JSONWebKeySignatureAlgorithm const type. -func PossibleJSONWebKeySignatureAlgorithmValues() []JSONWebKeySignatureAlgorithm { - return []JSONWebKeySignatureAlgorithm{ES256, ES256K, ES384, ES512, PS256, PS384, PS512, RS256, RS384, RS512, RSNULL} -} - -// JSONWebKeyType enumerates the values for json web key type. -type JSONWebKeyType string - -const ( - // EC Elliptic Curve. - EC JSONWebKeyType = "EC" - // ECHSM Elliptic Curve with a private key which is not exportable from the HSM. - ECHSM JSONWebKeyType = "EC-HSM" - // Oct Not supported in this version. Octet sequence (used to represent symmetric keys) - Oct JSONWebKeyType = "oct" - // RSA RSA (https://tools.ietf.org/html/rfc3447) - RSA JSONWebKeyType = "RSA" - // RSAHSM RSA with a private key which is not exportable from the HSM. - RSAHSM JSONWebKeyType = "RSA-HSM" -) - -// PossibleJSONWebKeyTypeValues returns an array of possible values for the JSONWebKeyType const type. -func PossibleJSONWebKeyTypeValues() []JSONWebKeyType { - return []JSONWebKeyType{EC, ECHSM, Oct, RSA, RSAHSM} -} - -// KeyUsageType enumerates the values for key usage type. -type KeyUsageType string - -const ( - // CRLSign ... - CRLSign KeyUsageType = "cRLSign" - // DataEncipherment ... - DataEncipherment KeyUsageType = "dataEncipherment" - // DecipherOnly ... - DecipherOnly KeyUsageType = "decipherOnly" - // DigitalSignature ... - DigitalSignature KeyUsageType = "digitalSignature" - // EncipherOnly ... - EncipherOnly KeyUsageType = "encipherOnly" - // KeyAgreement ... - KeyAgreement KeyUsageType = "keyAgreement" - // KeyCertSign ... - KeyCertSign KeyUsageType = "keyCertSign" - // KeyEncipherment ... - KeyEncipherment KeyUsageType = "keyEncipherment" - // NonRepudiation ... - NonRepudiation KeyUsageType = "nonRepudiation" -) - -// PossibleKeyUsageTypeValues returns an array of possible values for the KeyUsageType const type. -func PossibleKeyUsageTypeValues() []KeyUsageType { - return []KeyUsageType{CRLSign, DataEncipherment, DecipherOnly, DigitalSignature, EncipherOnly, KeyAgreement, KeyCertSign, KeyEncipherment, NonRepudiation} -} - -// SasTokenType enumerates the values for sas token type. -type SasTokenType string - -const ( - // Account ... - Account SasTokenType = "account" - // Service ... - Service SasTokenType = "service" -) - -// PossibleSasTokenTypeValues returns an array of possible values for the SasTokenType const type. -func PossibleSasTokenTypeValues() []SasTokenType { - return []SasTokenType{Account, Service} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go deleted file mode 100644 index 19b0a74a48f..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/models.go +++ /dev/null @@ -1,3601 +0,0 @@ -package keyvault - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault" - -// Action the action that will be executed. -type Action struct { - // ActionType - The type of the action. Possible values include: 'EmailContacts', 'AutoRenew' - ActionType ActionType `json:"action_type,omitempty"` -} - -// AdministratorDetails details of the organization administrator of the certificate issuer. -type AdministratorDetails struct { - // FirstName - First name. - FirstName *string `json:"first_name,omitempty"` - // LastName - Last name. - LastName *string `json:"last_name,omitempty"` - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Attributes the object attributes managed by the KeyVault service. -type Attributes struct { - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for Attributes. -func (a Attributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if a.Enabled != nil { - objectMap["enabled"] = a.Enabled - } - if a.NotBefore != nil { - objectMap["nbf"] = a.NotBefore - } - if a.Expires != nil { - objectMap["exp"] = a.Expires - } - return json.Marshal(objectMap) -} - -// BackupCertificateResult the backup certificate result, containing the backup blob. -type BackupCertificateResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up certificate. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupCertificateResult. -func (bcr BackupCertificateResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupKeyResult the backup key result, containing the backup blob. -type BackupKeyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up key. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupKeyResult. -func (bkr BackupKeyResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupSecretResult the backup secret result, containing the backup blob. -type BackupSecretResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up secret. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupSecretResult. -func (bsr BackupSecretResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// BackupStorageResult the backup storage result, containing the backup blob. -type BackupStorageResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; The backup blob containing the backed up storage account. (a URL-encoded base64 string) - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for BackupStorageResult. -func (bsr BackupStorageResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateAttributes the certificate management attributes. -type CertificateAttributes struct { - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for certificates in the current vault. If it contains 'Purgeable', the certificate can be permanently deleted by a privileged user; otherwise, only the system can purge the certificate, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateAttributes. -func (ca CertificateAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ca.Enabled != nil { - objectMap["enabled"] = ca.Enabled - } - if ca.NotBefore != nil { - objectMap["nbf"] = ca.NotBefore - } - if ca.Expires != nil { - objectMap["exp"] = ca.Expires - } - return json.Marshal(objectMap) -} - -// CertificateBundle a certificate bundle consists of a certificate (X509) plus its attributes. -type CertificateBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateBundle. -func (cb CertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cb.Cer != nil { - objectMap["cer"] = cb.Cer - } - if cb.ContentType != nil { - objectMap["contentType"] = cb.ContentType - } - if cb.Attributes != nil { - objectMap["attributes"] = cb.Attributes - } - if cb.Tags != nil { - objectMap["tags"] = cb.Tags - } - return json.Marshal(objectMap) -} - -// CertificateCreateParameters the certificate create parameters. -type CertificateCreateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateCreateParameters. -func (ccp CertificateCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ccp.CertificatePolicy != nil { - objectMap["policy"] = ccp.CertificatePolicy - } - if ccp.CertificateAttributes != nil { - objectMap["attributes"] = ccp.CertificateAttributes - } - if ccp.Tags != nil { - objectMap["tags"] = ccp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateImportParameters the certificate import parameters. -type CertificateImportParameters struct { - // Base64EncodedCertificate - A PEM file or a base64-encoded PFX file. PEM files need to contain the private key. - Base64EncodedCertificate *string `json:"value,omitempty"` - // Password - If the private key in base64EncodedCertificate is encrypted, the password used for encryption. - Password *string `json:"pwd,omitempty"` - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateImportParameters. -func (cip CertificateImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cip.Base64EncodedCertificate != nil { - objectMap["value"] = cip.Base64EncodedCertificate - } - if cip.Password != nil { - objectMap["pwd"] = cip.Password - } - if cip.CertificatePolicy != nil { - objectMap["policy"] = cip.CertificatePolicy - } - if cip.CertificateAttributes != nil { - objectMap["attributes"] = cip.CertificateAttributes - } - if cip.Tags != nil { - objectMap["tags"] = cip.Tags - } - return json.Marshal(objectMap) -} - -// CertificateIssuerItem the certificate issuer item containing certificate issuer metadata. -type CertificateIssuerItem struct { - // ID - Certificate Identifier. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` -} - -// CertificateIssuerListResult the certificate issuer list result. -type CertificateIssuerListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificate issuers in the key vault along with a link to the next page of certificate issuers. - Value *[]CertificateIssuerItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificate issuers. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateIssuerListResult. -func (cilr CertificateIssuerListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateIssuerListResultIterator provides access to a complete listing of CertificateIssuerItem -// values. -type CertificateIssuerListResultIterator struct { - i int - page CertificateIssuerListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateIssuerListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateIssuerListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateIssuerListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateIssuerListResultIterator) Response() CertificateIssuerListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateIssuerListResultIterator) Value() CertificateIssuerItem { - if !iter.page.NotDone() { - return CertificateIssuerItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateIssuerListResultIterator type. -func NewCertificateIssuerListResultIterator(page CertificateIssuerListResultPage) CertificateIssuerListResultIterator { - return CertificateIssuerListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (cilr CertificateIssuerListResult) IsEmpty() bool { - return cilr.Value == nil || len(*cilr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (cilr CertificateIssuerListResult) hasNextLink() bool { - return cilr.NextLink != nil && len(*cilr.NextLink) != 0 -} - -// certificateIssuerListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (cilr CertificateIssuerListResult) certificateIssuerListResultPreparer(ctx context.Context) (*http.Request, error) { - if !cilr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(cilr.NextLink))) -} - -// CertificateIssuerListResultPage contains a page of CertificateIssuerItem values. -type CertificateIssuerListResultPage struct { - fn func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error) - cilr CertificateIssuerListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateIssuerListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateIssuerListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.cilr) - if err != nil { - return err - } - page.cilr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateIssuerListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateIssuerListResultPage) NotDone() bool { - return !page.cilr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateIssuerListResultPage) Response() CertificateIssuerListResult { - return page.cilr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateIssuerListResultPage) Values() []CertificateIssuerItem { - if page.cilr.IsEmpty() { - return nil - } - return *page.cilr.Value -} - -// Creates a new instance of the CertificateIssuerListResultPage type. -func NewCertificateIssuerListResultPage(cur CertificateIssuerListResult, getNextPage func(context.Context, CertificateIssuerListResult) (CertificateIssuerListResult, error)) CertificateIssuerListResultPage { - return CertificateIssuerListResultPage{ - fn: getNextPage, - cilr: cur, - } -} - -// CertificateIssuerSetParameters the certificate issuer set parameters. -type CertificateIssuerSetParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateIssuerUpdateParameters the certificate issuer update parameters. -type CertificateIssuerUpdateParameters struct { - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// CertificateItem the certificate item containing certificate metadata. -type CertificateItem struct { - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateItem. -func (ci CertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ci.ID != nil { - objectMap["id"] = ci.ID - } - if ci.Attributes != nil { - objectMap["attributes"] = ci.Attributes - } - if ci.Tags != nil { - objectMap["tags"] = ci.Tags - } - if ci.X509Thumbprint != nil { - objectMap["x5t"] = ci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// CertificateListResult the certificate list result. -type CertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of certificates in the key vault along with a link to the next page of certificates. - Value *[]CertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateListResult. -func (clr CertificateListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// CertificateListResultIterator provides access to a complete listing of CertificateItem values. -type CertificateListResultIterator struct { - i int - page CertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *CertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *CertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter CertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter CertificateListResultIterator) Response() CertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter CertificateListResultIterator) Value() CertificateItem { - if !iter.page.NotDone() { - return CertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the CertificateListResultIterator type. -func NewCertificateListResultIterator(page CertificateListResultPage) CertificateListResultIterator { - return CertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (clr CertificateListResult) IsEmpty() bool { - return clr.Value == nil || len(*clr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (clr CertificateListResult) hasNextLink() bool { - return clr.NextLink != nil && len(*clr.NextLink) != 0 -} - -// certificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (clr CertificateListResult) certificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if !clr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(clr.NextLink))) -} - -// CertificateListResultPage contains a page of CertificateItem values. -type CertificateListResultPage struct { - fn func(context.Context, CertificateListResult) (CertificateListResult, error) - clr CertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *CertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/CertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.clr) - if err != nil { - return err - } - page.clr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *CertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page CertificateListResultPage) NotDone() bool { - return !page.clr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page CertificateListResultPage) Response() CertificateListResult { - return page.clr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page CertificateListResultPage) Values() []CertificateItem { - if page.clr.IsEmpty() { - return nil - } - return *page.clr.Value -} - -// Creates a new instance of the CertificateListResultPage type. -func NewCertificateListResultPage(cur CertificateListResult, getNextPage func(context.Context, CertificateListResult) (CertificateListResult, error)) CertificateListResultPage { - return CertificateListResultPage{ - fn: getNextPage, - clr: cur, - } -} - -// CertificateMergeParameters the certificate merge parameters -type CertificateMergeParameters struct { - // X509Certificates - The certificate or the certificate chain to merge. - X509Certificates *[][]byte `json:"x5c,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateMergeParameters. -func (cmp CertificateMergeParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cmp.X509Certificates != nil { - objectMap["x5c"] = cmp.X509Certificates - } - if cmp.CertificateAttributes != nil { - objectMap["attributes"] = cmp.CertificateAttributes - } - if cmp.Tags != nil { - objectMap["tags"] = cmp.Tags - } - return json.Marshal(objectMap) -} - -// CertificateOperation a certificate operation is returned in case of asynchronous requests. -type CertificateOperation struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Csr - The certificate signing request (CSR) that is being used in the certificate operation. - Csr *[]byte `json:"csr,omitempty"` - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` - // Status - Status of the certificate operation. - Status *string `json:"status,omitempty"` - // StatusDetails - The status details of the certificate operation. - StatusDetails *string `json:"status_details,omitempty"` - // Error - Error encountered, if any, during the certificate operation. - Error *Error `json:"error,omitempty"` - // Target - Location which contains the result of the certificate operation. - Target *string `json:"target,omitempty"` - // RequestID - Identifier for the certificate operation. - RequestID *string `json:"request_id,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificateOperation. -func (co CertificateOperation) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if co.IssuerParameters != nil { - objectMap["issuer"] = co.IssuerParameters - } - if co.Csr != nil { - objectMap["csr"] = co.Csr - } - if co.CancellationRequested != nil { - objectMap["cancellation_requested"] = co.CancellationRequested - } - if co.Status != nil { - objectMap["status"] = co.Status - } - if co.StatusDetails != nil { - objectMap["status_details"] = co.StatusDetails - } - if co.Error != nil { - objectMap["error"] = co.Error - } - if co.Target != nil { - objectMap["target"] = co.Target - } - if co.RequestID != nil { - objectMap["request_id"] = co.RequestID - } - return json.Marshal(objectMap) -} - -// CertificateOperationUpdateParameter the certificate operation update parameters. -type CertificateOperationUpdateParameter struct { - // CancellationRequested - Indicates if cancellation was requested on the certificate operation. - CancellationRequested *bool `json:"cancellation_requested,omitempty"` -} - -// CertificatePolicy management policy for a certificate. -type CertificatePolicy struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // KeyProperties - Properties of the key backing a certificate. - KeyProperties *KeyProperties `json:"key_props,omitempty"` - // SecretProperties - Properties of the secret backing a certificate. - SecretProperties *SecretProperties `json:"secret_props,omitempty"` - // X509CertificateProperties - Properties of the X509 component of a certificate. - X509CertificateProperties *X509CertificateProperties `json:"x509_props,omitempty"` - // LifetimeActions - Actions that will be performed by Key Vault over the lifetime of a certificate. - LifetimeActions *[]LifetimeAction `json:"lifetime_actions,omitempty"` - // IssuerParameters - Parameters for the issuer of the X509 component of a certificate. - IssuerParameters *IssuerParameters `json:"issuer,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for CertificatePolicy. -func (cp CertificatePolicy) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cp.KeyProperties != nil { - objectMap["key_props"] = cp.KeyProperties - } - if cp.SecretProperties != nil { - objectMap["secret_props"] = cp.SecretProperties - } - if cp.X509CertificateProperties != nil { - objectMap["x509_props"] = cp.X509CertificateProperties - } - if cp.LifetimeActions != nil { - objectMap["lifetime_actions"] = cp.LifetimeActions - } - if cp.IssuerParameters != nil { - objectMap["issuer"] = cp.IssuerParameters - } - if cp.Attributes != nil { - objectMap["attributes"] = cp.Attributes - } - return json.Marshal(objectMap) -} - -// CertificateRestoreParameters the certificate restore parameters. -type CertificateRestoreParameters struct { - // CertificateBundleBackup - The backup blob associated with a certificate bundle. (a URL-encoded base64 string) - CertificateBundleBackup *string `json:"value,omitempty"` -} - -// CertificateUpdateParameters the certificate update parameters. -type CertificateUpdateParameters struct { - // CertificatePolicy - The management policy for the certificate. - CertificatePolicy *CertificatePolicy `json:"policy,omitempty"` - // CertificateAttributes - The attributes of the certificate (optional). - CertificateAttributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for CertificateUpdateParameters. -func (cup CertificateUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if cup.CertificatePolicy != nil { - objectMap["policy"] = cup.CertificatePolicy - } - if cup.CertificateAttributes != nil { - objectMap["attributes"] = cup.CertificateAttributes - } - if cup.Tags != nil { - objectMap["tags"] = cup.Tags - } - return json.Marshal(objectMap) -} - -// Contact the contact information for the vault certificates. -type Contact struct { - // EmailAddress - Email address. - EmailAddress *string `json:"email,omitempty"` - // Name - Name. - Name *string `json:"name,omitempty"` - // Phone - Phone number. - Phone *string `json:"phone,omitempty"` -} - -// Contacts the contacts for the vault certificates. -type Contacts struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the contacts collection. - ID *string `json:"id,omitempty"` - // ContactList - The contact list for the vault certificates. - ContactList *[]Contact `json:"contacts,omitempty"` -} - -// MarshalJSON is the custom marshaler for Contacts. -func (c Contacts) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if c.ContactList != nil { - objectMap["contacts"] = c.ContactList - } - return json.Marshal(objectMap) -} - -// DeletedCertificateBundle a Deleted Certificate consisting of its previous id, attributes and its tags, -// as well as information on when it will be purged. -type DeletedCertificateBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The certificate id. - ID *string `json:"id,omitempty"` - // Kid - READ-ONLY; The key id. - Kid *string `json:"kid,omitempty"` - // Sid - READ-ONLY; The secret id. - Sid *string `json:"sid,omitempty"` - // X509Thumbprint - READ-ONLY; Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` - // Policy - READ-ONLY; The management policy. - Policy *CertificatePolicy `json:"policy,omitempty"` - // Cer - CER contents of x509 certificate. - Cer *[]byte `json:"cer,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The certificate attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateBundle. -func (dcb DeletedCertificateBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dcb.RecoveryID != nil { - objectMap["recoveryId"] = dcb.RecoveryID - } - if dcb.Cer != nil { - objectMap["cer"] = dcb.Cer - } - if dcb.ContentType != nil { - objectMap["contentType"] = dcb.ContentType - } - if dcb.Attributes != nil { - objectMap["attributes"] = dcb.Attributes - } - if dcb.Tags != nil { - objectMap["tags"] = dcb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedCertificateItem the deleted certificate item containing metadata about the deleted certificate. -type DeletedCertificateItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted certificate. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the certificate is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the certificate was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Certificate identifier. - ID *string `json:"id,omitempty"` - // Attributes - The certificate management attributes. - Attributes *CertificateAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // X509Thumbprint - Thumbprint of the certificate. (a URL-encoded base64 string) - X509Thumbprint *string `json:"x5t,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateItem. -func (dci DeletedCertificateItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dci.RecoveryID != nil { - objectMap["recoveryId"] = dci.RecoveryID - } - if dci.ID != nil { - objectMap["id"] = dci.ID - } - if dci.Attributes != nil { - objectMap["attributes"] = dci.Attributes - } - if dci.Tags != nil { - objectMap["tags"] = dci.Tags - } - if dci.X509Thumbprint != nil { - objectMap["x5t"] = dci.X509Thumbprint - } - return json.Marshal(objectMap) -} - -// DeletedCertificateListResult a list of certificates that have been deleted in this vault. -type DeletedCertificateListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted certificates in the vault along with a link to the next page of deleted certificates - Value *[]DeletedCertificateItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted certificates. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedCertificateListResult. -func (dclr DeletedCertificateListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedCertificateListResultIterator provides access to a complete listing of DeletedCertificateItem -// values. -type DeletedCertificateListResultIterator struct { - i int - page DeletedCertificateListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedCertificateListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedCertificateListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedCertificateListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedCertificateListResultIterator) Response() DeletedCertificateListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedCertificateListResultIterator) Value() DeletedCertificateItem { - if !iter.page.NotDone() { - return DeletedCertificateItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedCertificateListResultIterator type. -func NewDeletedCertificateListResultIterator(page DeletedCertificateListResultPage) DeletedCertificateListResultIterator { - return DeletedCertificateListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dclr DeletedCertificateListResult) IsEmpty() bool { - return dclr.Value == nil || len(*dclr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dclr DeletedCertificateListResult) hasNextLink() bool { - return dclr.NextLink != nil && len(*dclr.NextLink) != 0 -} - -// deletedCertificateListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dclr DeletedCertificateListResult) deletedCertificateListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dclr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dclr.NextLink))) -} - -// DeletedCertificateListResultPage contains a page of DeletedCertificateItem values. -type DeletedCertificateListResultPage struct { - fn func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error) - dclr DeletedCertificateListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedCertificateListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedCertificateListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dclr) - if err != nil { - return err - } - page.dclr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedCertificateListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedCertificateListResultPage) NotDone() bool { - return !page.dclr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedCertificateListResultPage) Response() DeletedCertificateListResult { - return page.dclr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedCertificateListResultPage) Values() []DeletedCertificateItem { - if page.dclr.IsEmpty() { - return nil - } - return *page.dclr.Value -} - -// Creates a new instance of the DeletedCertificateListResultPage type. -func NewDeletedCertificateListResultPage(cur DeletedCertificateListResult, getNextPage func(context.Context, DeletedCertificateListResult) (DeletedCertificateListResult, error)) DeletedCertificateListResultPage { - return DeletedCertificateListResultPage{ - fn: getNextPage, - dclr: cur, - } -} - -// DeletedKeyBundle a DeletedKeyBundle consisting of a WebKey plus its Attributes and deletion info -type DeletedKeyBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyBundle. -func (dkb DeletedKeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dkb.RecoveryID != nil { - objectMap["recoveryId"] = dkb.RecoveryID - } - if dkb.Key != nil { - objectMap["key"] = dkb.Key - } - if dkb.Attributes != nil { - objectMap["attributes"] = dkb.Attributes - } - if dkb.Tags != nil { - objectMap["tags"] = dkb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyItem the deleted key item containing the deleted key metadata and information about deletion. -type DeletedKeyItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted key. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the key is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the key was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyItem. -func (dki DeletedKeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dki.RecoveryID != nil { - objectMap["recoveryId"] = dki.RecoveryID - } - if dki.Kid != nil { - objectMap["kid"] = dki.Kid - } - if dki.Attributes != nil { - objectMap["attributes"] = dki.Attributes - } - if dki.Tags != nil { - objectMap["tags"] = dki.Tags - } - return json.Marshal(objectMap) -} - -// DeletedKeyListResult a list of keys that have been deleted in this vault. -type DeletedKeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of deleted keys in the vault along with a link to the next page of deleted keys - Value *[]DeletedKeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedKeyListResult. -func (dklr DeletedKeyListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedKeyListResultIterator provides access to a complete listing of DeletedKeyItem values. -type DeletedKeyListResultIterator struct { - i int - page DeletedKeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedKeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedKeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedKeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedKeyListResultIterator) Response() DeletedKeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedKeyListResultIterator) Value() DeletedKeyItem { - if !iter.page.NotDone() { - return DeletedKeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedKeyListResultIterator type. -func NewDeletedKeyListResultIterator(page DeletedKeyListResultPage) DeletedKeyListResultIterator { - return DeletedKeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dklr DeletedKeyListResult) IsEmpty() bool { - return dklr.Value == nil || len(*dklr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dklr DeletedKeyListResult) hasNextLink() bool { - return dklr.NextLink != nil && len(*dklr.NextLink) != 0 -} - -// deletedKeyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dklr DeletedKeyListResult) deletedKeyListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dklr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dklr.NextLink))) -} - -// DeletedKeyListResultPage contains a page of DeletedKeyItem values. -type DeletedKeyListResultPage struct { - fn func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error) - dklr DeletedKeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedKeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedKeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dklr) - if err != nil { - return err - } - page.dklr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedKeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedKeyListResultPage) NotDone() bool { - return !page.dklr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedKeyListResultPage) Response() DeletedKeyListResult { - return page.dklr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedKeyListResultPage) Values() []DeletedKeyItem { - if page.dklr.IsEmpty() { - return nil - } - return *page.dklr.Value -} - -// Creates a new instance of the DeletedKeyListResultPage type. -func NewDeletedKeyListResultPage(cur DeletedKeyListResult, getNextPage func(context.Context, DeletedKeyListResult) (DeletedKeyListResult, error)) DeletedKeyListResultPage { - return DeletedKeyListResultPage{ - fn: getNextPage, - dklr: cur, - } -} - -// DeletedSasDefinitionBundle a deleted SAS definition bundle consisting of its previous id, attributes and -// its tags, as well as information on when it will be purged. -type DeletedSasDefinitionBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The SAS definition id. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; Storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // Attributes - READ-ONLY; The SAS definition attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionBundle. -func (dsdb DeletedSasDefinitionBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsdb.RecoveryID != nil { - objectMap["recoveryId"] = dsdb.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionItem the deleted SAS definition item containing metadata about the deleted SAS -// definition. -type DeletedSasDefinitionItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted SAS definition. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the SAS definition is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the SAS definition was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The storage SAS identifier. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; The storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Attributes - READ-ONLY; The SAS definition management attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionItem. -func (dsdi DeletedSasDefinitionItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsdi.RecoveryID != nil { - objectMap["recoveryId"] = dsdi.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionListResult the deleted SAS definition list result -type DeletedSasDefinitionListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted SAS definitions in the vault along with a link to the next page of deleted sas definitions - Value *[]DeletedSasDefinitionItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted SAS definitions. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSasDefinitionListResult. -func (dsdlr DeletedSasDefinitionListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedSasDefinitionListResultIterator provides access to a complete listing of DeletedSasDefinitionItem -// values. -type DeletedSasDefinitionListResultIterator struct { - i int - page DeletedSasDefinitionListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedSasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedSasDefinitionListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedSasDefinitionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedSasDefinitionListResultIterator) Response() DeletedSasDefinitionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedSasDefinitionListResultIterator) Value() DeletedSasDefinitionItem { - if !iter.page.NotDone() { - return DeletedSasDefinitionItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedSasDefinitionListResultIterator type. -func NewDeletedSasDefinitionListResultIterator(page DeletedSasDefinitionListResultPage) DeletedSasDefinitionListResultIterator { - return DeletedSasDefinitionListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dsdlr DeletedSasDefinitionListResult) IsEmpty() bool { - return dsdlr.Value == nil || len(*dsdlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dsdlr DeletedSasDefinitionListResult) hasNextLink() bool { - return dsdlr.NextLink != nil && len(*dsdlr.NextLink) != 0 -} - -// deletedSasDefinitionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dsdlr DeletedSasDefinitionListResult) deletedSasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dsdlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dsdlr.NextLink))) -} - -// DeletedSasDefinitionListResultPage contains a page of DeletedSasDefinitionItem values. -type DeletedSasDefinitionListResultPage struct { - fn func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error) - dsdlr DeletedSasDefinitionListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedSasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSasDefinitionListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dsdlr) - if err != nil { - return err - } - page.dsdlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedSasDefinitionListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedSasDefinitionListResultPage) NotDone() bool { - return !page.dsdlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedSasDefinitionListResultPage) Response() DeletedSasDefinitionListResult { - return page.dsdlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedSasDefinitionListResultPage) Values() []DeletedSasDefinitionItem { - if page.dsdlr.IsEmpty() { - return nil - } - return *page.dsdlr.Value -} - -// Creates a new instance of the DeletedSasDefinitionListResultPage type. -func NewDeletedSasDefinitionListResultPage(cur DeletedSasDefinitionListResult, getNextPage func(context.Context, DeletedSasDefinitionListResult) (DeletedSasDefinitionListResult, error)) DeletedSasDefinitionListResultPage { - return DeletedSasDefinitionListResultPage{ - fn: getNextPage, - dsdlr: cur, - } -} - -// DeletedSecretBundle a Deleted Secret consisting of its previous id, attributes and its tags, as well as -// information on when it will be purged. -type DeletedSecretBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretBundle. -func (dsb DeletedSecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsb.RecoveryID != nil { - objectMap["recoveryId"] = dsb.RecoveryID - } - if dsb.Value != nil { - objectMap["value"] = dsb.Value - } - if dsb.ID != nil { - objectMap["id"] = dsb.ID - } - if dsb.ContentType != nil { - objectMap["contentType"] = dsb.ContentType - } - if dsb.Attributes != nil { - objectMap["attributes"] = dsb.Attributes - } - if dsb.Tags != nil { - objectMap["tags"] = dsb.Tags - } - return json.Marshal(objectMap) -} - -// DeletedSecretItem the deleted secret item containing metadata about the deleted secret. -type DeletedSecretItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted secret. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the secret is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the secret was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretItem. -func (dsi DeletedSecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsi.RecoveryID != nil { - objectMap["recoveryId"] = dsi.RecoveryID - } - if dsi.ID != nil { - objectMap["id"] = dsi.ID - } - if dsi.Attributes != nil { - objectMap["attributes"] = dsi.Attributes - } - if dsi.Tags != nil { - objectMap["tags"] = dsi.Tags - } - if dsi.ContentType != nil { - objectMap["contentType"] = dsi.ContentType - } - return json.Marshal(objectMap) -} - -// DeletedSecretListResult the deleted secret list result -type DeletedSecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted secrets in the vault along with a link to the next page of deleted secrets - Value *[]DeletedSecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedSecretListResult. -func (dslr DeletedSecretListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedSecretListResultIterator provides access to a complete listing of DeletedSecretItem values. -type DeletedSecretListResultIterator struct { - i int - page DeletedSecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedSecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedSecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedSecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedSecretListResultIterator) Response() DeletedSecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedSecretListResultIterator) Value() DeletedSecretItem { - if !iter.page.NotDone() { - return DeletedSecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedSecretListResultIterator type. -func NewDeletedSecretListResultIterator(page DeletedSecretListResultPage) DeletedSecretListResultIterator { - return DeletedSecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dslr DeletedSecretListResult) IsEmpty() bool { - return dslr.Value == nil || len(*dslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dslr DeletedSecretListResult) hasNextLink() bool { - return dslr.NextLink != nil && len(*dslr.NextLink) != 0 -} - -// deletedSecretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dslr DeletedSecretListResult) deletedSecretListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dslr.NextLink))) -} - -// DeletedSecretListResultPage contains a page of DeletedSecretItem values. -type DeletedSecretListResultPage struct { - fn func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error) - dslr DeletedSecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedSecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedSecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dslr) - if err != nil { - return err - } - page.dslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedSecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedSecretListResultPage) NotDone() bool { - return !page.dslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedSecretListResultPage) Response() DeletedSecretListResult { - return page.dslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedSecretListResultPage) Values() []DeletedSecretItem { - if page.dslr.IsEmpty() { - return nil - } - return *page.dslr.Value -} - -// Creates a new instance of the DeletedSecretListResultPage type. -func NewDeletedSecretListResultPage(cur DeletedSecretListResult, getNextPage func(context.Context, DeletedSecretListResult) (DeletedSecretListResult, error)) DeletedSecretListResultPage { - return DeletedSecretListResultPage{ - fn: getNextPage, - dslr: cur, - } -} - -// DeletedStorageAccountItem the deleted storage account item containing metadata about the deleted storage -// account. -type DeletedStorageAccountItem struct { - // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; Storage identifier. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; Storage account resource Id. - ResourceID *string `json:"resourceId,omitempty"` - // Attributes - READ-ONLY; The storage account management attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageAccountItem. -func (dsai DeletedStorageAccountItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsai.RecoveryID != nil { - objectMap["recoveryId"] = dsai.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedStorageBundle a deleted storage account bundle consisting of its previous id, attributes and its -// tags, as well as information on when it will be purged. -type DeletedStorageBundle struct { - autorest.Response `json:"-"` - // RecoveryID - The url of the recovery object, used to identify and recover the deleted storage account. - RecoveryID *string `json:"recoveryId,omitempty"` - // ScheduledPurgeDate - READ-ONLY; The time when the storage account is scheduled to be purged, in UTC - ScheduledPurgeDate *date.UnixTime `json:"scheduledPurgeDate,omitempty"` - // DeletedDate - READ-ONLY; The time when the storage account was deleted, in UTC - DeletedDate *date.UnixTime `json:"deletedDate,omitempty"` - // ID - READ-ONLY; The storage account id. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; The storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - READ-ONLY; The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // Attributes - READ-ONLY; The storage account attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageBundle. -func (dsb DeletedStorageBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsb.RecoveryID != nil { - objectMap["recoveryId"] = dsb.RecoveryID - } - return json.Marshal(objectMap) -} - -// DeletedStorageListResult the deleted storage account list result -type DeletedStorageListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of the deleted storage accounts in the vault along with a link to the next page of deleted storage accounts - Value *[]DeletedStorageAccountItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of deleted storage accounts. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for DeletedStorageListResult. -func (dslr DeletedStorageListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// DeletedStorageListResultIterator provides access to a complete listing of DeletedStorageAccountItem -// values. -type DeletedStorageListResultIterator struct { - i int - page DeletedStorageListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *DeletedStorageListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *DeletedStorageListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter DeletedStorageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter DeletedStorageListResultIterator) Response() DeletedStorageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter DeletedStorageListResultIterator) Value() DeletedStorageAccountItem { - if !iter.page.NotDone() { - return DeletedStorageAccountItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the DeletedStorageListResultIterator type. -func NewDeletedStorageListResultIterator(page DeletedStorageListResultPage) DeletedStorageListResultIterator { - return DeletedStorageListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (dslr DeletedStorageListResult) IsEmpty() bool { - return dslr.Value == nil || len(*dslr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (dslr DeletedStorageListResult) hasNextLink() bool { - return dslr.NextLink != nil && len(*dslr.NextLink) != 0 -} - -// deletedStorageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (dslr DeletedStorageListResult) deletedStorageListResultPreparer(ctx context.Context) (*http.Request, error) { - if !dslr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(dslr.NextLink))) -} - -// DeletedStorageListResultPage contains a page of DeletedStorageAccountItem values. -type DeletedStorageListResultPage struct { - fn func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error) - dslr DeletedStorageListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *DeletedStorageListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DeletedStorageListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.dslr) - if err != nil { - return err - } - page.dslr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *DeletedStorageListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page DeletedStorageListResultPage) NotDone() bool { - return !page.dslr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page DeletedStorageListResultPage) Response() DeletedStorageListResult { - return page.dslr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page DeletedStorageListResultPage) Values() []DeletedStorageAccountItem { - if page.dslr.IsEmpty() { - return nil - } - return *page.dslr.Value -} - -// Creates a new instance of the DeletedStorageListResultPage type. -func NewDeletedStorageListResultPage(cur DeletedStorageListResult, getNextPage func(context.Context, DeletedStorageListResult) (DeletedStorageListResult, error)) DeletedStorageListResultPage { - return DeletedStorageListResultPage{ - fn: getNextPage, - dslr: cur, - } -} - -// Error the key vault server error. -type Error struct { - // Code - READ-ONLY; The error code. - Code *string `json:"code,omitempty"` - // Message - READ-ONLY; The error message. - Message *string `json:"message,omitempty"` - // InnerError - READ-ONLY - InnerError *Error `json:"innererror,omitempty"` -} - -// MarshalJSON is the custom marshaler for Error. -func (e Error) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// ErrorType the key vault error exception. -type ErrorType struct { - // Error - READ-ONLY - Error *Error `json:"error,omitempty"` -} - -// MarshalJSON is the custom marshaler for ErrorType. -func (et ErrorType) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// IssuerAttributes the attributes of an issuer managed by the Key Vault service. -type IssuerAttributes struct { - // Enabled - Determines whether the issuer is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for IssuerAttributes. -func (ia IssuerAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ia.Enabled != nil { - objectMap["enabled"] = ia.Enabled - } - return json.Marshal(objectMap) -} - -// IssuerBundle the issuer for Key Vault certificate. -type IssuerBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; Identifier for the issuer object. - ID *string `json:"id,omitempty"` - // Provider - The issuer provider. - Provider *string `json:"provider,omitempty"` - // Credentials - The credentials to be used for the issuer. - Credentials *IssuerCredentials `json:"credentials,omitempty"` - // OrganizationDetails - Details of the organization as provided to the issuer. - OrganizationDetails *OrganizationDetails `json:"org_details,omitempty"` - // Attributes - Attributes of the issuer object. - Attributes *IssuerAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for IssuerBundle. -func (ib IssuerBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ib.Provider != nil { - objectMap["provider"] = ib.Provider - } - if ib.Credentials != nil { - objectMap["credentials"] = ib.Credentials - } - if ib.OrganizationDetails != nil { - objectMap["org_details"] = ib.OrganizationDetails - } - if ib.Attributes != nil { - objectMap["attributes"] = ib.Attributes - } - return json.Marshal(objectMap) -} - -// IssuerCredentials the credentials to be used for the certificate issuer. -type IssuerCredentials struct { - // AccountID - The user name/account name/account id. - AccountID *string `json:"account_id,omitempty"` - // Password - The password/secret/account key. - Password *string `json:"pwd,omitempty"` -} - -// IssuerParameters parameters for the issuer of the X509 component of a certificate. -type IssuerParameters struct { - // Name - Name of the referenced issuer object or reserved names; for example, 'Self' or 'Unknown'. - Name *string `json:"name,omitempty"` - // CertificateType - Certificate type as supported by the provider (optional); for example 'OV-SSL', 'EV-SSL' - CertificateType *string `json:"cty,omitempty"` - // CertificateTransparency - Indicates if the certificates generated under this policy should be published to certificate transparency logs. - CertificateTransparency *bool `json:"cert_transparency,omitempty"` -} - -// JSONWebKey as of http://tools.ietf.org/html/draft-ietf-jose-json-web-key-18 -type JSONWebKey struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Kty - JsonWebKey Key Type (kty), as defined in https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - KeyOps *[]string `json:"key_ops,omitempty"` - // N - RSA modulus. (a URL-encoded base64 string) - N *string `json:"n,omitempty"` - // E - RSA public exponent. (a URL-encoded base64 string) - E *string `json:"e,omitempty"` - // D - RSA private exponent, or the D component of an EC private key. (a URL-encoded base64 string) - D *string `json:"d,omitempty"` - // DP - RSA private key parameter. (a URL-encoded base64 string) - DP *string `json:"dp,omitempty"` - // DQ - RSA private key parameter. (a URL-encoded base64 string) - DQ *string `json:"dq,omitempty"` - // QI - RSA private key parameter. (a URL-encoded base64 string) - QI *string `json:"qi,omitempty"` - // P - RSA secret prime. (a URL-encoded base64 string) - P *string `json:"p,omitempty"` - // Q - RSA secret prime, with p < q. (a URL-encoded base64 string) - Q *string `json:"q,omitempty"` - // K - Symmetric key. (a URL-encoded base64 string) - K *string `json:"k,omitempty"` - // T - HSM Token, used with 'Bring Your Own Key'. (a URL-encoded base64 string) - T *string `json:"key_hsm,omitempty"` - // Crv - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Crv JSONWebKeyCurveName `json:"crv,omitempty"` - // X - X component of an EC public key. (a URL-encoded base64 string) - X *string `json:"x,omitempty"` - // Y - Y component of an EC public key. (a URL-encoded base64 string) - Y *string `json:"y,omitempty"` -} - -// KeyAttributes the attributes of a key managed by the key vault service. -type KeyAttributes struct { - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for keys in the current vault. If it contains 'Purgeable' the key can be permanently deleted by a privileged user; otherwise, only the system can purge the key, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyAttributes. -func (ka KeyAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ka.Enabled != nil { - objectMap["enabled"] = ka.Enabled - } - if ka.NotBefore != nil { - objectMap["nbf"] = ka.NotBefore - } - if ka.Expires != nil { - objectMap["exp"] = ka.Expires - } - return json.Marshal(objectMap) -} - -// KeyBundle a KeyBundle consisting of a WebKey plus its attributes. -type KeyBundle struct { - autorest.Response `json:"-"` - // Key - The Json web key. - Key *JSONWebKey `json:"key,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyBundle. -func (kb KeyBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kb.Key != nil { - objectMap["key"] = kb.Key - } - if kb.Attributes != nil { - objectMap["attributes"] = kb.Attributes - } - if kb.Tags != nil { - objectMap["tags"] = kb.Tags - } - return json.Marshal(objectMap) -} - -// KeyCreateParameters the key create parameters. -type KeyCreateParameters struct { - // Kty - The type of key to create. For valid values, see JsonWebKeyType. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - Kty JSONWebKeyType `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Curve JSONWebKeyCurveName `json:"crv,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyCreateParameters. -func (kcp KeyCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kcp.Kty != "" { - objectMap["kty"] = kcp.Kty - } - if kcp.KeySize != nil { - objectMap["key_size"] = kcp.KeySize - } - if kcp.KeyOps != nil { - objectMap["key_ops"] = kcp.KeyOps - } - if kcp.KeyAttributes != nil { - objectMap["attributes"] = kcp.KeyAttributes - } - if kcp.Tags != nil { - objectMap["tags"] = kcp.Tags - } - if kcp.Curve != "" { - objectMap["crv"] = kcp.Curve - } - return json.Marshal(objectMap) -} - -// KeyImportParameters the key import parameters. -type KeyImportParameters struct { - // Hsm - Whether to import as a hardware key (HSM) or software key. - Hsm *bool `json:"Hsm,omitempty"` - // Key - The Json web key - Key *JSONWebKey `json:"key,omitempty"` - // KeyAttributes - The key management attributes. - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyImportParameters. -func (kip KeyImportParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kip.Hsm != nil { - objectMap["Hsm"] = kip.Hsm - } - if kip.Key != nil { - objectMap["key"] = kip.Key - } - if kip.KeyAttributes != nil { - objectMap["attributes"] = kip.KeyAttributes - } - if kip.Tags != nil { - objectMap["tags"] = kip.Tags - } - return json.Marshal(objectMap) -} - -// KeyItem the key item containing key metadata. -type KeyItem struct { - // Kid - Key identifier. - Kid *string `json:"kid,omitempty"` - // Attributes - The key management attributes. - Attributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Managed - READ-ONLY; True if the key's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyItem. -func (ki KeyItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ki.Kid != nil { - objectMap["kid"] = ki.Kid - } - if ki.Attributes != nil { - objectMap["attributes"] = ki.Attributes - } - if ki.Tags != nil { - objectMap["tags"] = ki.Tags - } - return json.Marshal(objectMap) -} - -// KeyListResult the key list result. -type KeyListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of keys in the key vault along with a link to the next page of keys. - Value *[]KeyItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of keys. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyListResult. -func (klr KeyListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// KeyListResultIterator provides access to a complete listing of KeyItem values. -type KeyListResultIterator struct { - i int - page KeyListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *KeyListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *KeyListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter KeyListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter KeyListResultIterator) Response() KeyListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter KeyListResultIterator) Value() KeyItem { - if !iter.page.NotDone() { - return KeyItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the KeyListResultIterator type. -func NewKeyListResultIterator(page KeyListResultPage) KeyListResultIterator { - return KeyListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (klr KeyListResult) IsEmpty() bool { - return klr.Value == nil || len(*klr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (klr KeyListResult) hasNextLink() bool { - return klr.NextLink != nil && len(*klr.NextLink) != 0 -} - -// keyListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (klr KeyListResult) keyListResultPreparer(ctx context.Context) (*http.Request, error) { - if !klr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(klr.NextLink))) -} - -// KeyListResultPage contains a page of KeyItem values. -type KeyListResultPage struct { - fn func(context.Context, KeyListResult) (KeyListResult, error) - klr KeyListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *KeyListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/KeyListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.klr) - if err != nil { - return err - } - page.klr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *KeyListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page KeyListResultPage) NotDone() bool { - return !page.klr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page KeyListResultPage) Response() KeyListResult { - return page.klr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page KeyListResultPage) Values() []KeyItem { - if page.klr.IsEmpty() { - return nil - } - return *page.klr.Value -} - -// Creates a new instance of the KeyListResultPage type. -func NewKeyListResultPage(cur KeyListResult, getNextPage func(context.Context, KeyListResult) (KeyListResult, error)) KeyListResultPage { - return KeyListResultPage{ - fn: getNextPage, - klr: cur, - } -} - -// KeyOperationResult the key operation result. -type KeyOperationResult struct { - autorest.Response `json:"-"` - // Kid - READ-ONLY; Key identifier - Kid *string `json:"kid,omitempty"` - // Result - READ-ONLY; a URL-encoded base64 string - Result *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyOperationResult. -func (kor KeyOperationResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// KeyOperationsParameters the key operations parameters. -type KeyOperationsParameters struct { - // Algorithm - algorithm identifier. Possible values include: 'RSAOAEP', 'RSAOAEP256', 'RSA15' - Algorithm JSONWebKeyEncryptionAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyProperties properties of the key pair backing a certificate. -type KeyProperties struct { - // Exportable - Indicates if the private key can be exported. - Exportable *bool `json:"exportable,omitempty"` - // KeyType - The type of key pair to be used for the certificate. Possible values include: 'EC', 'ECHSM', 'RSA', 'RSAHSM', 'Oct' - KeyType JSONWebKeyType `json:"kty,omitempty"` - // KeySize - The key size in bits. For example: 2048, 3072, or 4096 for RSA. - KeySize *int32 `json:"key_size,omitempty"` - // ReuseKey - Indicates if the same key pair will be used on certificate renewal. - ReuseKey *bool `json:"reuse_key,omitempty"` - // Curve - Elliptic curve name. For valid values, see JsonWebKeyCurveName. Possible values include: 'P256', 'P384', 'P521', 'P256K' - Curve JSONWebKeyCurveName `json:"crv,omitempty"` -} - -// KeyRestoreParameters the key restore parameters. -type KeyRestoreParameters struct { - // KeyBundleBackup - The backup blob associated with a key bundle. (a URL-encoded base64 string) - KeyBundleBackup *string `json:"value,omitempty"` -} - -// KeySignParameters the key operations parameters. -type KeySignParameters struct { - // Algorithm - The signing/verification algorithm identifier. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Value - a URL-encoded base64 string - Value *string `json:"value,omitempty"` -} - -// KeyUpdateParameters the key update parameters. -type KeyUpdateParameters struct { - // KeyOps - Json web key operations. For more information on possible key operations, see JsonWebKeyOperation. - KeyOps *[]JSONWebKeyOperation `json:"key_ops,omitempty"` - KeyAttributes *KeyAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for KeyUpdateParameters. -func (kup KeyUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if kup.KeyOps != nil { - objectMap["key_ops"] = kup.KeyOps - } - if kup.KeyAttributes != nil { - objectMap["attributes"] = kup.KeyAttributes - } - if kup.Tags != nil { - objectMap["tags"] = kup.Tags - } - return json.Marshal(objectMap) -} - -// KeyVerifyParameters the key verify parameters. -type KeyVerifyParameters struct { - // Algorithm - The signing/verification algorithm. For more information on possible algorithm types, see JsonWebKeySignatureAlgorithm. Possible values include: 'PS256', 'PS384', 'PS512', 'RS256', 'RS384', 'RS512', 'RSNULL', 'ES256', 'ES384', 'ES512', 'ES256K' - Algorithm JSONWebKeySignatureAlgorithm `json:"alg,omitempty"` - // Digest - The digest used for signing. (a URL-encoded base64 string) - Digest *string `json:"digest,omitempty"` - // Signature - The signature to be verified. (a URL-encoded base64 string) - Signature *string `json:"value,omitempty"` -} - -// KeyVerifyResult the key verify result. -type KeyVerifyResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; True if the signature is verified, otherwise false. - Value *bool `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for KeyVerifyResult. -func (kvr KeyVerifyResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// LifetimeAction action and its trigger that will be performed by Key Vault over the lifetime of a -// certificate. -type LifetimeAction struct { - // Trigger - The condition that will execute the action. - Trigger *Trigger `json:"trigger,omitempty"` - // Action - The action that will be executed. - Action *Action `json:"action,omitempty"` -} - -// OrganizationDetails details of the organization of the certificate issuer. -type OrganizationDetails struct { - // ID - Id of the organization. - ID *string `json:"id,omitempty"` - // AdminDetails - Details of the organization administrator. - AdminDetails *[]AdministratorDetails `json:"admin_details,omitempty"` -} - -// PendingCertificateSigningRequestResult the pending certificate signing request result. -type PendingCertificateSigningRequestResult struct { - // Value - READ-ONLY; The pending certificate signing request as Base64 encoded string. - Value *string `json:"value,omitempty"` -} - -// MarshalJSON is the custom marshaler for PendingCertificateSigningRequestResult. -func (pcsrr PendingCertificateSigningRequestResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionAttributes the SAS definition management attributes. -type SasDefinitionAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for SAS definitions in the current vault. If it contains 'Purgeable' the SAS definition can be permanently deleted by a privileged user; otherwise, only the system can purge the SAS definition, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionAttributes. -func (sda SasDefinitionAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sda.Enabled != nil { - objectMap["enabled"] = sda.Enabled - } - return json.Marshal(objectMap) -} - -// SasDefinitionBundle a SAS definition bundle consists of key vault SAS definition details plus its -// attributes. -type SasDefinitionBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The SAS definition id. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; Storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // TemplateURI - READ-ONLY; The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - READ-ONLY; The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - READ-ONLY; The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // Attributes - READ-ONLY; The SAS definition attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionBundle. -func (sdb SasDefinitionBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionCreateParameters the SAS definition create parameters. -type SasDefinitionCreateParameters struct { - // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionCreateParameters. -func (sdcp SasDefinitionCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdcp.TemplateURI != nil { - objectMap["templateUri"] = sdcp.TemplateURI - } - if sdcp.SasType != "" { - objectMap["sasType"] = sdcp.SasType - } - if sdcp.ValidityPeriod != nil { - objectMap["validityPeriod"] = sdcp.ValidityPeriod - } - if sdcp.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdcp.SasDefinitionAttributes - } - if sdcp.Tags != nil { - objectMap["tags"] = sdcp.Tags - } - return json.Marshal(objectMap) -} - -// SasDefinitionItem the SAS definition item containing storage SAS definition metadata. -type SasDefinitionItem struct { - // ID - READ-ONLY; The storage SAS identifier. - ID *string `json:"id,omitempty"` - // SecretID - READ-ONLY; The storage account SAS definition secret id. - SecretID *string `json:"sid,omitempty"` - // Attributes - READ-ONLY; The SAS definition management attributes. - Attributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionItem. -func (sdi SasDefinitionItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionListResult the storage account SAS definition list result. -type SasDefinitionListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of SAS definitions along with a link to the next page of SAS definitions. - Value *[]SasDefinitionItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of SAS definitions. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionListResult. -func (sdlr SasDefinitionListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SasDefinitionListResultIterator provides access to a complete listing of SasDefinitionItem values. -type SasDefinitionListResultIterator struct { - i int - page SasDefinitionListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SasDefinitionListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SasDefinitionListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SasDefinitionListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SasDefinitionListResultIterator) Response() SasDefinitionListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SasDefinitionListResultIterator) Value() SasDefinitionItem { - if !iter.page.NotDone() { - return SasDefinitionItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SasDefinitionListResultIterator type. -func NewSasDefinitionListResultIterator(page SasDefinitionListResultPage) SasDefinitionListResultIterator { - return SasDefinitionListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (sdlr SasDefinitionListResult) IsEmpty() bool { - return sdlr.Value == nil || len(*sdlr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (sdlr SasDefinitionListResult) hasNextLink() bool { - return sdlr.NextLink != nil && len(*sdlr.NextLink) != 0 -} - -// sasDefinitionListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (sdlr SasDefinitionListResult) sasDefinitionListResultPreparer(ctx context.Context) (*http.Request, error) { - if !sdlr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(sdlr.NextLink))) -} - -// SasDefinitionListResultPage contains a page of SasDefinitionItem values. -type SasDefinitionListResultPage struct { - fn func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error) - sdlr SasDefinitionListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SasDefinitionListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SasDefinitionListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.sdlr) - if err != nil { - return err - } - page.sdlr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SasDefinitionListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SasDefinitionListResultPage) NotDone() bool { - return !page.sdlr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SasDefinitionListResultPage) Response() SasDefinitionListResult { - return page.sdlr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SasDefinitionListResultPage) Values() []SasDefinitionItem { - if page.sdlr.IsEmpty() { - return nil - } - return *page.sdlr.Value -} - -// Creates a new instance of the SasDefinitionListResultPage type. -func NewSasDefinitionListResultPage(cur SasDefinitionListResult, getNextPage func(context.Context, SasDefinitionListResult) (SasDefinitionListResult, error)) SasDefinitionListResultPage { - return SasDefinitionListResultPage{ - fn: getNextPage, - sdlr: cur, - } -} - -// SasDefinitionUpdateParameters the SAS definition update parameters. -type SasDefinitionUpdateParameters struct { - // TemplateURI - The SAS definition token template signed with an arbitrary key. Tokens created according to the SAS definition will have the same properties as the template. - TemplateURI *string `json:"templateUri,omitempty"` - // SasType - The type of SAS token the SAS definition will create. Possible values include: 'Account', 'Service' - SasType SasTokenType `json:"sasType,omitempty"` - // ValidityPeriod - The validity period of SAS tokens created according to the SAS definition. - ValidityPeriod *string `json:"validityPeriod,omitempty"` - // SasDefinitionAttributes - The attributes of the SAS definition. - SasDefinitionAttributes *SasDefinitionAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SasDefinitionUpdateParameters. -func (sdup SasDefinitionUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sdup.TemplateURI != nil { - objectMap["templateUri"] = sdup.TemplateURI - } - if sdup.SasType != "" { - objectMap["sasType"] = sdup.SasType - } - if sdup.ValidityPeriod != nil { - objectMap["validityPeriod"] = sdup.ValidityPeriod - } - if sdup.SasDefinitionAttributes != nil { - objectMap["attributes"] = sdup.SasDefinitionAttributes - } - if sdup.Tags != nil { - objectMap["tags"] = sdup.Tags - } - return json.Marshal(objectMap) -} - -// SecretAttributes the secret management attributes. -type SecretAttributes struct { - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for secrets in the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise, only the system can purge the secret, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` - // Enabled - Determines whether the object is enabled. - Enabled *bool `json:"enabled,omitempty"` - // NotBefore - Not before date in UTC. - NotBefore *date.UnixTime `json:"nbf,omitempty"` - // Expires - Expiry date in UTC. - Expires *date.UnixTime `json:"exp,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretAttributes. -func (sa SecretAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sa.Enabled != nil { - objectMap["enabled"] = sa.Enabled - } - if sa.NotBefore != nil { - objectMap["nbf"] = sa.NotBefore - } - if sa.Expires != nil { - objectMap["exp"] = sa.Expires - } - return json.Marshal(objectMap) -} - -// SecretBundle a secret consisting of a value, id and its attributes. -type SecretBundle struct { - autorest.Response `json:"-"` - // Value - The secret value. - Value *string `json:"value,omitempty"` - // ID - The secret id. - ID *string `json:"id,omitempty"` - // ContentType - The content type of the secret. - ContentType *string `json:"contentType,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // Kid - READ-ONLY; If this is a secret backing a KV certificate, then this field specifies the corresponding key backing the KV certificate. - Kid *string `json:"kid,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a secret backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretBundle. -func (sb SecretBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sb.Value != nil { - objectMap["value"] = sb.Value - } - if sb.ID != nil { - objectMap["id"] = sb.ID - } - if sb.ContentType != nil { - objectMap["contentType"] = sb.ContentType - } - if sb.Attributes != nil { - objectMap["attributes"] = sb.Attributes - } - if sb.Tags != nil { - objectMap["tags"] = sb.Tags - } - return json.Marshal(objectMap) -} - -// SecretItem the secret item containing secret metadata. -type SecretItem struct { - // ID - Secret identifier. - ID *string `json:"id,omitempty"` - // Attributes - The secret management attributes. - Attributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // Managed - READ-ONLY; True if the secret's lifetime is managed by key vault. If this is a key backing a certificate, then managed will be true. - Managed *bool `json:"managed,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretItem. -func (si SecretItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if si.ID != nil { - objectMap["id"] = si.ID - } - if si.Attributes != nil { - objectMap["attributes"] = si.Attributes - } - if si.Tags != nil { - objectMap["tags"] = si.Tags - } - if si.ContentType != nil { - objectMap["contentType"] = si.ContentType - } - return json.Marshal(objectMap) -} - -// SecretListResult the secret list result. -type SecretListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of secrets in the key vault along with a link to the next page of secrets. - Value *[]SecretItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of secrets. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretListResult. -func (slr SecretListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// SecretListResultIterator provides access to a complete listing of SecretItem values. -type SecretListResultIterator struct { - i int - page SecretListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *SecretListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *SecretListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter SecretListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter SecretListResultIterator) Response() SecretListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter SecretListResultIterator) Value() SecretItem { - if !iter.page.NotDone() { - return SecretItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the SecretListResultIterator type. -func NewSecretListResultIterator(page SecretListResultPage) SecretListResultIterator { - return SecretListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr SecretListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (slr SecretListResult) hasNextLink() bool { - return slr.NextLink != nil && len(*slr.NextLink) != 0 -} - -// secretListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr SecretListResult) secretListResultPreparer(ctx context.Context) (*http.Request, error) { - if !slr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// SecretListResultPage contains a page of SecretItem values. -type SecretListResultPage struct { - fn func(context.Context, SecretListResult) (SecretListResult, error) - slr SecretListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *SecretListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/SecretListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *SecretListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page SecretListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page SecretListResultPage) Response() SecretListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page SecretListResultPage) Values() []SecretItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the SecretListResultPage type. -func NewSecretListResultPage(cur SecretListResult, getNextPage func(context.Context, SecretListResult) (SecretListResult, error)) SecretListResultPage { - return SecretListResultPage{ - fn: getNextPage, - slr: cur, - } -} - -// SecretProperties properties of the key backing a certificate. -type SecretProperties struct { - // ContentType - The media type (MIME type). - ContentType *string `json:"contentType,omitempty"` -} - -// SecretRestoreParameters the secret restore parameters. -type SecretRestoreParameters struct { - // SecretBundleBackup - The backup blob associated with a secret bundle. (a URL-encoded base64 string) - SecretBundleBackup *string `json:"value,omitempty"` -} - -// SecretSetParameters the secret set parameters. -type SecretSetParameters struct { - // Value - The value of the secret. - Value *string `json:"value,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` -} - -// MarshalJSON is the custom marshaler for SecretSetParameters. -func (ssp SecretSetParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ssp.Value != nil { - objectMap["value"] = ssp.Value - } - if ssp.Tags != nil { - objectMap["tags"] = ssp.Tags - } - if ssp.ContentType != nil { - objectMap["contentType"] = ssp.ContentType - } - if ssp.SecretAttributes != nil { - objectMap["attributes"] = ssp.SecretAttributes - } - return json.Marshal(objectMap) -} - -// SecretUpdateParameters the secret update parameters. -type SecretUpdateParameters struct { - // ContentType - Type of the secret value such as a password. - ContentType *string `json:"contentType,omitempty"` - // SecretAttributes - The secret management attributes. - SecretAttributes *SecretAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for SecretUpdateParameters. -func (sup SecretUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sup.ContentType != nil { - objectMap["contentType"] = sup.ContentType - } - if sup.SecretAttributes != nil { - objectMap["attributes"] = sup.SecretAttributes - } - if sup.Tags != nil { - objectMap["tags"] = sup.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountAttributes the storage account management attributes. -type StorageAccountAttributes struct { - // Enabled - the enabled state of the object. - Enabled *bool `json:"enabled,omitempty"` - // Created - READ-ONLY; Creation time in UTC. - Created *date.UnixTime `json:"created,omitempty"` - // Updated - READ-ONLY; Last updated time in UTC. - Updated *date.UnixTime `json:"updated,omitempty"` - // RecoveryLevel - READ-ONLY; Reflects the deletion recovery level currently in effect for storage accounts in the current vault. If it contains 'Purgeable' the storage account can be permanently deleted by a privileged user; otherwise, only the system can purge the storage account, at the end of the retention interval. Possible values include: 'Purgeable', 'RecoverablePurgeable', 'Recoverable', 'RecoverableProtectedSubscription', 'CustomizedRecoverablePurgeable', 'CustomizedRecoverable', 'CustomizedRecoverableProtectedSubscription' - RecoveryLevel DeletionRecoveryLevel `json:"recoveryLevel,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageAccountAttributes. -func (saa StorageAccountAttributes) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if saa.Enabled != nil { - objectMap["enabled"] = saa.Enabled - } - return json.Marshal(objectMap) -} - -// StorageAccountCreateParameters the storage account create parameters. -type StorageAccountCreateParameters struct { - // ResourceID - Storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - Current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountCreateParameters. -func (sacp StorageAccountCreateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sacp.ResourceID != nil { - objectMap["resourceId"] = sacp.ResourceID - } - if sacp.ActiveKeyName != nil { - objectMap["activeKeyName"] = sacp.ActiveKeyName - } - if sacp.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = sacp.AutoRegenerateKey - } - if sacp.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = sacp.RegenerationPeriod - } - if sacp.StorageAccountAttributes != nil { - objectMap["attributes"] = sacp.StorageAccountAttributes - } - if sacp.Tags != nil { - objectMap["tags"] = sacp.Tags - } - return json.Marshal(objectMap) -} - -// StorageAccountItem the storage account item containing storage account metadata. -type StorageAccountItem struct { - // ID - READ-ONLY; Storage identifier. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; Storage account resource Id. - ResourceID *string `json:"resourceId,omitempty"` - // Attributes - READ-ONLY; The storage account management attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountItem. -func (sai StorageAccountItem) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageAccountRegenerteKeyParameters the storage account key regenerate parameters. -type StorageAccountRegenerteKeyParameters struct { - // KeyName - The storage account key name. - KeyName *string `json:"keyName,omitempty"` -} - -// StorageAccountUpdateParameters the storage account update parameters. -type StorageAccountUpdateParameters struct { - // ActiveKeyName - The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // StorageAccountAttributes - The attributes of the storage account. - StorageAccountAttributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - Application specific metadata in the form of key-value pairs. - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageAccountUpdateParameters. -func (saup StorageAccountUpdateParameters) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if saup.ActiveKeyName != nil { - objectMap["activeKeyName"] = saup.ActiveKeyName - } - if saup.AutoRegenerateKey != nil { - objectMap["autoRegenerateKey"] = saup.AutoRegenerateKey - } - if saup.RegenerationPeriod != nil { - objectMap["regenerationPeriod"] = saup.RegenerationPeriod - } - if saup.StorageAccountAttributes != nil { - objectMap["attributes"] = saup.StorageAccountAttributes - } - if saup.Tags != nil { - objectMap["tags"] = saup.Tags - } - return json.Marshal(objectMap) -} - -// StorageBundle a Storage account bundle consists of key vault storage account details plus its -// attributes. -type StorageBundle struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; The storage account id. - ID *string `json:"id,omitempty"` - // ResourceID - READ-ONLY; The storage account resource id. - ResourceID *string `json:"resourceId,omitempty"` - // ActiveKeyName - READ-ONLY; The current active storage account key name. - ActiveKeyName *string `json:"activeKeyName,omitempty"` - // AutoRegenerateKey - READ-ONLY; whether keyvault should manage the storage account for the user. - AutoRegenerateKey *bool `json:"autoRegenerateKey,omitempty"` - // RegenerationPeriod - READ-ONLY; The key regeneration time duration specified in ISO-8601 format. - RegenerationPeriod *string `json:"regenerationPeriod,omitempty"` - // Attributes - READ-ONLY; The storage account attributes. - Attributes *StorageAccountAttributes `json:"attributes,omitempty"` - // Tags - READ-ONLY; Application specific metadata in the form of key-value pairs - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for StorageBundle. -func (sb StorageBundle) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageListResult the storage accounts list result. -type StorageListResult struct { - autorest.Response `json:"-"` - // Value - READ-ONLY; A response message containing a list of storage accounts in the key vault along with a link to the next page of storage accounts. - Value *[]StorageAccountItem `json:"value,omitempty"` - // NextLink - READ-ONLY; The URL to get the next set of storage accounts. - NextLink *string `json:"nextLink,omitempty"` -} - -// MarshalJSON is the custom marshaler for StorageListResult. -func (slr StorageListResult) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// StorageListResultIterator provides access to a complete listing of StorageAccountItem values. -type StorageListResultIterator struct { - i int - page StorageListResultPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *StorageListResultIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *StorageListResultIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter StorageListResultIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter StorageListResultIterator) Response() StorageListResult { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter StorageListResultIterator) Value() StorageAccountItem { - if !iter.page.NotDone() { - return StorageAccountItem{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the StorageListResultIterator type. -func NewStorageListResultIterator(page StorageListResultPage) StorageListResultIterator { - return StorageListResultIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (slr StorageListResult) IsEmpty() bool { - return slr.Value == nil || len(*slr.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (slr StorageListResult) hasNextLink() bool { - return slr.NextLink != nil && len(*slr.NextLink) != 0 -} - -// storageListResultPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (slr StorageListResult) storageListResultPreparer(ctx context.Context) (*http.Request, error) { - if !slr.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(slr.NextLink))) -} - -// StorageListResultPage contains a page of StorageAccountItem values. -type StorageListResultPage struct { - fn func(context.Context, StorageListResult) (StorageListResult, error) - slr StorageListResult -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *StorageListResultPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/StorageListResultPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.slr) - if err != nil { - return err - } - page.slr = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *StorageListResultPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page StorageListResultPage) NotDone() bool { - return !page.slr.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page StorageListResultPage) Response() StorageListResult { - return page.slr -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page StorageListResultPage) Values() []StorageAccountItem { - if page.slr.IsEmpty() { - return nil - } - return *page.slr.Value -} - -// Creates a new instance of the StorageListResultPage type. -func NewStorageListResultPage(cur StorageListResult, getNextPage func(context.Context, StorageListResult) (StorageListResult, error)) StorageListResultPage { - return StorageListResultPage{ - fn: getNextPage, - slr: cur, - } -} - -// StorageRestoreParameters the secret restore parameters. -type StorageRestoreParameters struct { - // StorageBundleBackup - The backup blob associated with a storage account. (a URL-encoded base64 string) - StorageBundleBackup *string `json:"value,omitempty"` -} - -// SubjectAlternativeNames the subject alternate names of a X509 object. -type SubjectAlternativeNames struct { - // Emails - Email addresses. - Emails *[]string `json:"emails,omitempty"` - // DNSNames - Domain names. - DNSNames *[]string `json:"dns_names,omitempty"` - // Upns - User principal names. - Upns *[]string `json:"upns,omitempty"` -} - -// Trigger a condition to be satisfied for an action to be executed. -type Trigger struct { - // LifetimePercentage - Percentage of lifetime at which to trigger. Value should be between 1 and 99. - LifetimePercentage *int32 `json:"lifetime_percentage,omitempty"` - // DaysBeforeExpiry - Days before expiry to attempt renewal. Value should be between 1 and validity_in_months multiplied by 27. If validity_in_months is 36, then value should be between 1 and 972 (36 * 27). - DaysBeforeExpiry *int32 `json:"days_before_expiry,omitempty"` -} - -// X509CertificateProperties properties of the X509 component of a certificate. -type X509CertificateProperties struct { - // Subject - The subject name. Should be a valid X509 distinguished Name. - Subject *string `json:"subject,omitempty"` - // Ekus - The enhanced key usage. - Ekus *[]string `json:"ekus,omitempty"` - // SubjectAlternativeNames - The subject alternative names. - SubjectAlternativeNames *SubjectAlternativeNames `json:"sans,omitempty"` - // KeyUsage - List of key usages. - KeyUsage *[]KeyUsageType `json:"key_usage,omitempty"` - // ValidityInMonths - The duration that the certificate is valid in months. - ValidityInMonths *int32 `json:"validity_months,omitempty"` -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go deleted file mode 100644 index c459083898b..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package keyvault - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " keyvault/7.0" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md deleted file mode 100644 index 52911e4cc5e..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/CHANGELOG.md +++ /dev/null @@ -1,2 +0,0 @@ -# Change History - diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/_meta.json deleted file mode 100644 index b55c341f884..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/_meta.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "commit": "3c764635e7d442b3e74caf593029fcd440b3ef82", - "readme": "/_/azure-rest-api-specs/specification/monitor/resource-manager/readme.md", - "tag": "package-2018-03", - "use": "@microsoft.azure/autorest.go@2.1.187", - "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2018-03 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/monitor/resource-manager/readme.md", - "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" - } -} \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go deleted file mode 100644 index 43753cb5b29..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go +++ /dev/null @@ -1,581 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ActionGroupsClient is the monitor Management Client -type ActionGroupsClient struct { - BaseClient -} - -// NewActionGroupsClient creates an instance of the ActionGroupsClient client. -func NewActionGroupsClient(subscriptionID string) ActionGroupsClient { - return NewActionGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewActionGroupsClientWithBaseURI creates an instance of the ActionGroupsClient client using a custom endpoint. Use -// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewActionGroupsClientWithBaseURI(baseURI string, subscriptionID string) ActionGroupsClient { - return ActionGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate create a new action group or update an existing one. -// Parameters: -// resourceGroupName - the name of the resource group. -// actionGroupName - the name of the action group. -// actionGroup - the action group to create or use for the update. -func (client ActionGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroup ActionGroupResource) (result ActionGroupResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: actionGroup, - Constraints: []validation.Constraint{{Target: "actionGroup.ActionGroup", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "actionGroup.ActionGroup.GroupShortName", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "actionGroup.ActionGroup.GroupShortName", Name: validation.MaxLength, Rule: 12, Chain: nil}}}, - {Target: "actionGroup.ActionGroup.Enabled", Name: validation.Null, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("insights.ActionGroupsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, actionGroupName, actionGroup) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ActionGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroup ActionGroupResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "actionGroupName": autorest.Encode("path", actionGroupName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), - autorest.WithJSON(actionGroup), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ActionGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ActionGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ActionGroupResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete delete an action group. -// Parameters: -// resourceGroupName - the name of the resource group. -// actionGroupName - the name of the action group. -func (client ActionGroupsClient) Delete(ctx context.Context, resourceGroupName string, actionGroupName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, actionGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ActionGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, actionGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "actionGroupName": autorest.Encode("path", actionGroupName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ActionGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ActionGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// EnableReceiver enable a receiver in an action group. This changes the receiver's status from Disabled to Enabled. -// This operation is only supported for Email or SMS receivers. -// Parameters: -// resourceGroupName - the name of the resource group. -// actionGroupName - the name of the action group. -// enableRequest - the receiver to re-enable. -func (client ActionGroupsClient) EnableReceiver(ctx context.Context, resourceGroupName string, actionGroupName string, enableRequest EnableRequest) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.EnableReceiver") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: enableRequest, - Constraints: []validation.Constraint{{Target: "enableRequest.ReceiverName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("insights.ActionGroupsClient", "EnableReceiver", err.Error()) - } - - req, err := client.EnableReceiverPreparer(ctx, resourceGroupName, actionGroupName, enableRequest) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "EnableReceiver", nil, "Failure preparing request") - return - } - - resp, err := client.EnableReceiverSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "EnableReceiver", resp, "Failure sending request") - return - } - - result, err = client.EnableReceiverResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "EnableReceiver", resp, "Failure responding to request") - return - } - - return -} - -// EnableReceiverPreparer prepares the EnableReceiver request. -func (client ActionGroupsClient) EnableReceiverPreparer(ctx context.Context, resourceGroupName string, actionGroupName string, enableRequest EnableRequest) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "actionGroupName": autorest.Encode("path", actionGroupName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}/subscribe", pathParameters), - autorest.WithJSON(enableRequest), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// EnableReceiverSender sends the EnableReceiver request. The method will close the -// http.Response Body if it receives an error. -func (client ActionGroupsClient) EnableReceiverSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// EnableReceiverResponder handles the response to the EnableReceiver request. The method always -// closes the http.Response Body. -func (client ActionGroupsClient) EnableReceiverResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusConflict), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get get an action group. -// Parameters: -// resourceGroupName - the name of the resource group. -// actionGroupName - the name of the action group. -func (client ActionGroupsClient) Get(ctx context.Context, resourceGroupName string, actionGroupName string) (result ActionGroupResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, actionGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ActionGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, actionGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "actionGroupName": autorest.Encode("path", actionGroupName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ActionGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ActionGroupsClient) GetResponder(resp *http.Response) (result ActionGroupResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup get a list of all action groups in a resource group. -// Parameters: -// resourceGroupName - the name of the resource group. -func (client ActionGroupsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ActionGroupList, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client ActionGroupsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client ActionGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client ActionGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result ActionGroupList, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListBySubscriptionID get a list of all action groups in a subscription. -func (client ActionGroupsClient) ListBySubscriptionID(ctx context.Context) (result ActionGroupList, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.ListBySubscriptionID") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListBySubscriptionIDPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListBySubscriptionID", nil, "Failure preparing request") - return - } - - resp, err := client.ListBySubscriptionIDSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListBySubscriptionID", resp, "Failure sending request") - return - } - - result, err = client.ListBySubscriptionIDResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListBySubscriptionID", resp, "Failure responding to request") - return - } - - return -} - -// ListBySubscriptionIDPreparer prepares the ListBySubscriptionID request. -func (client ActionGroupsClient) ListBySubscriptionIDPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/actionGroups", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListBySubscriptionIDSender sends the ListBySubscriptionID request. The method will close the -// http.Response Body if it receives an error. -func (client ActionGroupsClient) ListBySubscriptionIDSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListBySubscriptionIDResponder handles the response to the ListBySubscriptionID request. The method always -// closes the http.Response Body. -func (client ActionGroupsClient) ListBySubscriptionIDResponder(resp *http.Response) (result ActionGroupList, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update updates an existing action group's tags. To update other fields use the CreateOrUpdate method. -// Parameters: -// resourceGroupName - the name of the resource group. -// actionGroupName - the name of the action group. -// actionGroupPatch - parameters supplied to the operation. -func (client ActionGroupsClient) Update(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroupPatch ActionGroupPatchBody) (result ActionGroupResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, actionGroupName, actionGroupPatch) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client ActionGroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroupPatch ActionGroupPatchBody) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "actionGroupName": autorest.Encode("path", actionGroupName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), - autorest.WithJSON(actionGroupPatch), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client ActionGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client ActionGroupsClient) UpdateResponder(resp *http.Response) (result ActionGroupResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go deleted file mode 100644 index 44aaf3ef1e4..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go +++ /dev/null @@ -1,498 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ActivityLogAlertsClient is the monitor Management Client -type ActivityLogAlertsClient struct { - BaseClient -} - -// NewActivityLogAlertsClient creates an instance of the ActivityLogAlertsClient client. -func NewActivityLogAlertsClient(subscriptionID string) ActivityLogAlertsClient { - return NewActivityLogAlertsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewActivityLogAlertsClientWithBaseURI creates an instance of the ActivityLogAlertsClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewActivityLogAlertsClientWithBaseURI(baseURI string, subscriptionID string) ActivityLogAlertsClient { - return ActivityLogAlertsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate create a new activity log alert or update an existing one. -// Parameters: -// resourceGroupName - the name of the resource group. -// activityLogAlertName - the name of the activity log alert. -// activityLogAlert - the activity log alert to create or use for the update. -func (client ActivityLogAlertsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlert ActivityLogAlertResource) (result ActivityLogAlertResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: activityLogAlert, - Constraints: []validation.Constraint{{Target: "activityLogAlert.ActivityLogAlert", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "activityLogAlert.ActivityLogAlert.Scopes", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "activityLogAlert.ActivityLogAlert.Condition", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "activityLogAlert.ActivityLogAlert.Condition.AllOf", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "activityLogAlert.ActivityLogAlert.Actions", Name: validation.Null, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("insights.ActivityLogAlertsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, activityLogAlertName, activityLogAlert) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ActivityLogAlertsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlert ActivityLogAlertResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "activityLogAlertName": autorest.Encode("path", activityLogAlertName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), - autorest.WithJSON(activityLogAlert), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ActivityLogAlertsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ActivityLogAlertsClient) CreateOrUpdateResponder(resp *http.Response) (result ActivityLogAlertResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete delete an activity log alert. -// Parameters: -// resourceGroupName - the name of the resource group. -// activityLogAlertName - the name of the activity log alert. -func (client ActivityLogAlertsClient) Delete(ctx context.Context, resourceGroupName string, activityLogAlertName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, activityLogAlertName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ActivityLogAlertsClient) DeletePreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "activityLogAlertName": autorest.Encode("path", activityLogAlertName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ActivityLogAlertsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ActivityLogAlertsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get get an activity log alert. -// Parameters: -// resourceGroupName - the name of the resource group. -// activityLogAlertName - the name of the activity log alert. -func (client ActivityLogAlertsClient) Get(ctx context.Context, resourceGroupName string, activityLogAlertName string) (result ActivityLogAlertResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, activityLogAlertName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ActivityLogAlertsClient) GetPreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "activityLogAlertName": autorest.Encode("path", activityLogAlertName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ActivityLogAlertsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ActivityLogAlertsClient) GetResponder(resp *http.Response) (result ActivityLogAlertResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup get a list of all activity log alerts in a resource group. -// Parameters: -// resourceGroupName - the name of the resource group. -func (client ActivityLogAlertsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ActivityLogAlertList, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client ActivityLogAlertsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client ActivityLogAlertsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client ActivityLogAlertsClient) ListByResourceGroupResponder(resp *http.Response) (result ActivityLogAlertList, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListBySubscriptionID get a list of all activity log alerts in a subscription. -func (client ActivityLogAlertsClient) ListBySubscriptionID(ctx context.Context) (result ActivityLogAlertList, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.ListBySubscriptionID") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListBySubscriptionIDPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListBySubscriptionID", nil, "Failure preparing request") - return - } - - resp, err := client.ListBySubscriptionIDSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListBySubscriptionID", resp, "Failure sending request") - return - } - - result, err = client.ListBySubscriptionIDResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListBySubscriptionID", resp, "Failure responding to request") - return - } - - return -} - -// ListBySubscriptionIDPreparer prepares the ListBySubscriptionID request. -func (client ActivityLogAlertsClient) ListBySubscriptionIDPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/activityLogAlerts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListBySubscriptionIDSender sends the ListBySubscriptionID request. The method will close the -// http.Response Body if it receives an error. -func (client ActivityLogAlertsClient) ListBySubscriptionIDSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListBySubscriptionIDResponder handles the response to the ListBySubscriptionID request. The method always -// closes the http.Response Body. -func (client ActivityLogAlertsClient) ListBySubscriptionIDResponder(resp *http.Response) (result ActivityLogAlertList, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update updates an existing ActivityLogAlertResource's tags. To update other fields use the CreateOrUpdate method. -// Parameters: -// resourceGroupName - the name of the resource group. -// activityLogAlertName - the name of the activity log alert. -// activityLogAlertPatch - parameters supplied to the operation. -func (client ActivityLogAlertsClient) Update(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlertPatch ActivityLogAlertPatchBody) (result ActivityLogAlertResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, activityLogAlertName, activityLogAlertPatch) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client ActivityLogAlertsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlertPatch ActivityLogAlertPatchBody) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "activityLogAlertName": autorest.Encode("path", activityLogAlertName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2017-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), - autorest.WithJSON(activityLogAlertPatch), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client ActivityLogAlertsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client ActivityLogAlertsClient) UpdateResponder(resp *http.Response) (result ActivityLogAlertResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go deleted file mode 100644 index 16dd215e6ad..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go +++ /dev/null @@ -1,166 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ActivityLogsClient is the monitor Management Client -type ActivityLogsClient struct { - BaseClient -} - -// NewActivityLogsClient creates an instance of the ActivityLogsClient client. -func NewActivityLogsClient(subscriptionID string) ActivityLogsClient { - return NewActivityLogsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewActivityLogsClientWithBaseURI creates an instance of the ActivityLogsClient client using a custom endpoint. Use -// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewActivityLogsClientWithBaseURI(baseURI string, subscriptionID string) ActivityLogsClient { - return ActivityLogsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List provides the list of records from the activity logs. -// Parameters: -// filter - reduces the set of data collected.
This argument is required and it also requires at least the -// start date/time.
The **$filter** argument is very restricted and allows only the following patterns.
- -// *List events for a resource group*: $filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and -// eventTimestamp le '2014-07-20T04:36:37.6407898Z' and resourceGroupName eq 'resourceGroupName'.
- *List -// events for resource*: $filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and eventTimestamp le -// '2014-07-20T04:36:37.6407898Z' and resourceUri eq 'resourceURI'.
- *List events for a subscription in a -// time range*: $filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and eventTimestamp le -// '2014-07-20T04:36:37.6407898Z'.
- *List events for a resource provider*: $filter=eventTimestamp ge -// '2014-07-16T04:36:37.6407898Z' and eventTimestamp le '2014-07-20T04:36:37.6407898Z' and resourceProvider eq -// 'resourceProviderName'.
- *List events for a correlation Id*: $filter=eventTimestamp ge -// '2014-07-16T04:36:37.6407898Z' and eventTimestamp le '2014-07-20T04:36:37.6407898Z' and correlationId eq -// 'correlationID'.

**NOTE**: No other syntax is allowed. -// selectParameter - used to fetch events with only the given properties.
The **$select** argument is a -// comma separated list of property names to be returned. Possible values are: *authorization*, *claims*, -// *correlationId*, *description*, *eventDataId*, *eventName*, *eventTimestamp*, *httpRequest*, *level*, -// *operationId*, *operationName*, *properties*, *resourceGroupName*, *resourceProviderName*, *resourceId*, -// *status*, *submissionTimestamp*, *subStatus*, *subscriptionId* -func (client ActivityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogsClient.List") - defer func() { - sc := -1 - if result.edc.Response.Response != nil { - sc = result.edc.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, filter, selectParameter) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.edc.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "List", resp, "Failure sending request") - return - } - - result.edc, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "List", resp, "Failure responding to request") - return - } - if result.edc.hasNextLink() && result.edc.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client ActivityLogsClient) ListPreparer(ctx context.Context, filter string, selectParameter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "$filter": autorest.Encode("query", filter), - "api-version": APIVersion, - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/eventtypes/management/values", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client ActivityLogsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client ActivityLogsClient) ListResponder(resp *http.Response) (result EventDataCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client ActivityLogsClient) listNextResults(ctx context.Context, lastResults EventDataCollection) (result EventDataCollection, err error) { - req, err := lastResults.eventDataCollectionPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client ActivityLogsClient) ListComplete(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, filter, selectParameter) - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go deleted file mode 100644 index c3bf26828d8..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go +++ /dev/null @@ -1,186 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AlertRuleIncidentsClient is the monitor Management Client -type AlertRuleIncidentsClient struct { - BaseClient -} - -// NewAlertRuleIncidentsClient creates an instance of the AlertRuleIncidentsClient client. -func NewAlertRuleIncidentsClient(subscriptionID string) AlertRuleIncidentsClient { - return NewAlertRuleIncidentsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAlertRuleIncidentsClientWithBaseURI creates an instance of the AlertRuleIncidentsClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewAlertRuleIncidentsClientWithBaseURI(baseURI string, subscriptionID string) AlertRuleIncidentsClient { - return AlertRuleIncidentsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets an incident associated to an alert rule -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// incidentName - the name of the incident to retrieve. -func (client AlertRuleIncidentsClient) Get(ctx context.Context, resourceGroupName string, ruleName string, incidentName string) (result Incident, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleIncidentsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, ruleName, incidentName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client AlertRuleIncidentsClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string, incidentName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "incidentName": autorest.Encode("path", incidentName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}/incidents/{incidentName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRuleIncidentsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client AlertRuleIncidentsClient) GetResponder(resp *http.Response) (result Incident, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByAlertRule gets a list of incidents associated to an alert rule -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client AlertRuleIncidentsClient) ListByAlertRule(ctx context.Context, resourceGroupName string, ruleName string) (result IncidentListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleIncidentsClient.ListByAlertRule") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListByAlertRulePreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "ListByAlertRule", nil, "Failure preparing request") - return - } - - resp, err := client.ListByAlertRuleSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "ListByAlertRule", resp, "Failure sending request") - return - } - - result, err = client.ListByAlertRuleResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "ListByAlertRule", resp, "Failure responding to request") - return - } - - return -} - -// ListByAlertRulePreparer prepares the ListByAlertRule request. -func (client AlertRuleIncidentsClient) ListByAlertRulePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}/incidents", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByAlertRuleSender sends the ListByAlertRule request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRuleIncidentsClient) ListByAlertRuleSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByAlertRuleResponder handles the response to the ListByAlertRule request. The method always -// closes the http.Response Body. -func (client AlertRuleIncidentsClient) ListByAlertRuleResponder(resp *http.Response) (result IncidentListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go deleted file mode 100644 index f5a2ab68238..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go +++ /dev/null @@ -1,495 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AlertRulesClient is the monitor Management Client -type AlertRulesClient struct { - BaseClient -} - -// NewAlertRulesClient creates an instance of the AlertRulesClient client. -func NewAlertRulesClient(subscriptionID string) AlertRulesClient { - return NewAlertRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAlertRulesClientWithBaseURI creates an instance of the AlertRulesClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewAlertRulesClientWithBaseURI(baseURI string, subscriptionID string) AlertRulesClient { - return AlertRulesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates a classic metric alert rule. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// parameters - the parameters of the rule to create or update. -func (client AlertRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters AlertRuleResource) (result AlertRuleResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.AlertRule", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.AlertRule.Name", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.AlertRule.IsEnabled", Name: validation.Null, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("insights.AlertRulesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, ruleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client AlertRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters AlertRuleResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client AlertRulesClient) CreateOrUpdateResponder(resp *http.Response) (result AlertRuleResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a classic metric alert rule -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client AlertRulesClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client AlertRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client AlertRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets a classic metric alert rule -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client AlertRulesClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result AlertRuleResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client AlertRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client AlertRulesClient) GetResponder(resp *http.Response) (result AlertRuleResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup list the classic metric alert rules within a resource group. -// Parameters: -// resourceGroupName - the name of the resource group. -func (client AlertRulesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AlertRuleResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client AlertRulesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRulesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client AlertRulesClient) ListByResourceGroupResponder(resp *http.Response) (result AlertRuleResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListBySubscription list the classic metric alert rules within a subscription. -func (client AlertRulesClient) ListBySubscription(ctx context.Context) (result AlertRuleResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.ListBySubscription") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListBySubscriptionPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListBySubscription", nil, "Failure preparing request") - return - } - - resp, err := client.ListBySubscriptionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListBySubscription", resp, "Failure sending request") - return - } - - result, err = client.ListBySubscriptionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListBySubscription", resp, "Failure responding to request") - return - } - - return -} - -// ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client AlertRulesClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/alertrules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListBySubscriptionSender sends the ListBySubscription request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRulesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always -// closes the http.Response Body. -func (client AlertRulesClient) ListBySubscriptionResponder(resp *http.Response) (result AlertRuleResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update updates an existing classic metric AlertRuleResource. To update other fields use the CreateOrUpdate method. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// alertRulesResource - parameters supplied to the operation. -func (client AlertRulesClient) Update(ctx context.Context, resourceGroupName string, ruleName string, alertRulesResource AlertRuleResourcePatch) (result AlertRuleResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, alertRulesResource) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client AlertRulesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, alertRulesResource AlertRuleResourcePatch) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), - autorest.WithJSON(alertRulesResource), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client AlertRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client AlertRulesClient) UpdateResponder(resp *http.Response) (result AlertRuleResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go deleted file mode 100644 index 86f253d0fb8..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go +++ /dev/null @@ -1,580 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// AutoscaleSettingsClient is the monitor Management Client -type AutoscaleSettingsClient struct { - BaseClient -} - -// NewAutoscaleSettingsClient creates an instance of the AutoscaleSettingsClient client. -func NewAutoscaleSettingsClient(subscriptionID string) AutoscaleSettingsClient { - return NewAutoscaleSettingsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewAutoscaleSettingsClientWithBaseURI creates an instance of the AutoscaleSettingsClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewAutoscaleSettingsClientWithBaseURI(baseURI string, subscriptionID string) AutoscaleSettingsClient { - return AutoscaleSettingsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates an autoscale setting. -// Parameters: -// resourceGroupName - the name of the resource group. -// autoscaleSettingName - the autoscale setting name. -// parameters - parameters supplied to the operation. -func (client AutoscaleSettingsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, autoscaleSettingName string, parameters AutoscaleSettingResource) (result AutoscaleSettingResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.AutoscaleSetting", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.AutoscaleSetting.Profiles", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.AutoscaleSetting.Profiles", Name: validation.MaxItems, Rule: 20, Chain: nil}}}, - }}}}}); err != nil { - return result, validation.NewError("insights.AutoscaleSettingsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, autoscaleSettingName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client AutoscaleSettingsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string, parameters AutoscaleSettingResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client AutoscaleSettingsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client AutoscaleSettingsClient) CreateOrUpdateResponder(resp *http.Response) (result AutoscaleSettingResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes and autoscale setting -// Parameters: -// resourceGroupName - the name of the resource group. -// autoscaleSettingName - the autoscale setting name. -func (client AutoscaleSettingsClient) Delete(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, autoscaleSettingName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client AutoscaleSettingsClient) DeletePreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client AutoscaleSettingsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client AutoscaleSettingsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets an autoscale setting -// Parameters: -// resourceGroupName - the name of the resource group. -// autoscaleSettingName - the autoscale setting name. -func (client AutoscaleSettingsClient) Get(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (result AutoscaleSettingResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, autoscaleSettingName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client AutoscaleSettingsClient) GetPreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client AutoscaleSettingsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client AutoscaleSettingsClient) GetResponder(resp *http.Response) (result AutoscaleSettingResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup lists the autoscale settings for a resource group -// Parameters: -// resourceGroupName - the name of the resource group. -func (client AutoscaleSettingsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AutoscaleSettingResourceCollectionPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.asrc.Response.Response != nil { - sc = result.asrc.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listByResourceGroupNextResults - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.asrc.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result.asrc, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - if result.asrc.hasNextLink() && result.asrc.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client AutoscaleSettingsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client AutoscaleSettingsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client AutoscaleSettingsClient) ListByResourceGroupResponder(resp *http.Response) (result AutoscaleSettingResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listByResourceGroupNextResults retrieves the next set of results, if any. -func (client AutoscaleSettingsClient) listByResourceGroupNextResults(ctx context.Context, lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) { - req, err := lastResults.autoscaleSettingResourceCollectionPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") - } - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. -func (client AutoscaleSettingsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result AutoscaleSettingResourceCollectionIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) - return -} - -// ListBySubscription lists the autoscale settings for a subscription -func (client AutoscaleSettingsClient) ListBySubscription(ctx context.Context) (result AutoscaleSettingResourceCollectionPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListBySubscription") - defer func() { - sc := -1 - if result.asrc.Response.Response != nil { - sc = result.asrc.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listBySubscriptionNextResults - req, err := client.ListBySubscriptionPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListBySubscription", nil, "Failure preparing request") - return - } - - resp, err := client.ListBySubscriptionSender(req) - if err != nil { - result.asrc.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListBySubscription", resp, "Failure sending request") - return - } - - result.asrc, err = client.ListBySubscriptionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListBySubscription", resp, "Failure responding to request") - return - } - if result.asrc.hasNextLink() && result.asrc.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client AutoscaleSettingsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/autoscalesettings", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListBySubscriptionSender sends the ListBySubscription request. The method will close the -// http.Response Body if it receives an error. -func (client AutoscaleSettingsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always -// closes the http.Response Body. -func (client AutoscaleSettingsClient) ListBySubscriptionResponder(resp *http.Response) (result AutoscaleSettingResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listBySubscriptionNextResults retrieves the next set of results, if any. -func (client AutoscaleSettingsClient) listBySubscriptionNextResults(ctx context.Context, lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) { - req, err := lastResults.autoscaleSettingResourceCollectionPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListBySubscriptionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request") - } - result, err = client.ListBySubscriptionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. -func (client AutoscaleSettingsClient) ListBySubscriptionComplete(ctx context.Context) (result AutoscaleSettingResourceCollectionIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListBySubscription") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.ListBySubscription(ctx) - return -} - -// Update updates an existing AutoscaleSettingsResource. To update other fields use the CreateOrUpdate method. -// Parameters: -// resourceGroupName - the name of the resource group. -// autoscaleSettingName - the autoscale setting name. -// autoscaleSettingResource - parameters supplied to the operation. -func (client AutoscaleSettingsClient) Update(ctx context.Context, resourceGroupName string, autoscaleSettingName string, autoscaleSettingResource AutoscaleSettingResourcePatch) (result AutoscaleSettingResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, autoscaleSettingName, autoscaleSettingResource) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client AutoscaleSettingsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string, autoscaleSettingResource AutoscaleSettingResourcePatch) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), - autorest.WithJSON(autoscaleSettingResource), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client AutoscaleSettingsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client AutoscaleSettingsClient) UpdateResponder(resp *http.Response) (result AutoscaleSettingResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go deleted file mode 100644 index e660f1093ae..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/client.go +++ /dev/null @@ -1,43 +0,0 @@ -// Deprecated: Please note, this package has been deprecated. A replacement package is available [github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor). We strongly encourage you to upgrade to continue receiving updates. See [Migration Guide](https://aka.ms/azsdk/golang/t2/migration) for guidance on upgrading. Refer to our [deprecation policy](https://azure.github.io/azure-sdk/policies_support.html) for more details. -// -// Package insights implements the Azure ARM Insights service API version . -// -// Monitor Management Client -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/go-autorest/autorest" -) - -const ( - // DefaultBaseURI is the default URI used for the service Insights - DefaultBaseURI = "https://management.azure.com" -) - -// BaseClient is the base client for Insights. -type BaseClient struct { - autorest.Client - BaseURI string - SubscriptionID string -} - -// New creates an instance of the BaseClient client. -func New(subscriptionID string) BaseClient { - return NewWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with -// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { - return BaseClient{ - Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: baseURI, - SubscriptionID: subscriptionID, - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go deleted file mode 100644 index 9708c52c249..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go +++ /dev/null @@ -1,332 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// DiagnosticSettingsClient is the monitor Management Client -type DiagnosticSettingsClient struct { - BaseClient -} - -// NewDiagnosticSettingsClient creates an instance of the DiagnosticSettingsClient client. -func NewDiagnosticSettingsClient(subscriptionID string) DiagnosticSettingsClient { - return NewDiagnosticSettingsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewDiagnosticSettingsClientWithBaseURI creates an instance of the DiagnosticSettingsClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewDiagnosticSettingsClientWithBaseURI(baseURI string, subscriptionID string) DiagnosticSettingsClient { - return DiagnosticSettingsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates diagnostic settings for the specified resource. -// Parameters: -// resourceURI - the identifier of the resource. -// parameters - parameters supplied to the operation. -// name - the name of the diagnostic setting. -func (client DiagnosticSettingsClient) CreateOrUpdate(ctx context.Context, resourceURI string, parameters DiagnosticSettingsResource, name string) (result DiagnosticSettingsResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.CreateOrUpdatePreparer(ctx, resourceURI, parameters, name) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client DiagnosticSettingsClient) CreateOrUpdatePreparer(ctx context.Context, resourceURI string, parameters DiagnosticSettingsResource, name string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "resourceUri": resourceURI, - } - - const APIVersion = "2017-05-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings/{name}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client DiagnosticSettingsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client DiagnosticSettingsClient) CreateOrUpdateResponder(resp *http.Response) (result DiagnosticSettingsResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes existing diagnostic settings for the specified resource. -// Parameters: -// resourceURI - the identifier of the resource. -// name - the name of the diagnostic setting. -func (client DiagnosticSettingsClient) Delete(ctx context.Context, resourceURI string, name string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceURI, name) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client DiagnosticSettingsClient) DeletePreparer(ctx context.Context, resourceURI string, name string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "resourceUri": resourceURI, - } - - const APIVersion = "2017-05-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings/{name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client DiagnosticSettingsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client DiagnosticSettingsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the active diagnostic settings for the specified resource. -// Parameters: -// resourceURI - the identifier of the resource. -// name - the name of the diagnostic setting. -func (client DiagnosticSettingsClient) Get(ctx context.Context, resourceURI string, name string) (result DiagnosticSettingsResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceURI, name) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client DiagnosticSettingsClient) GetPreparer(ctx context.Context, resourceURI string, name string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "resourceUri": resourceURI, - } - - const APIVersion = "2017-05-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings/{name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client DiagnosticSettingsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client DiagnosticSettingsClient) GetResponder(resp *http.Response) (result DiagnosticSettingsResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List gets the active diagnostic settings list for the specified resource. -// Parameters: -// resourceURI - the identifier of the resource. -func (client DiagnosticSettingsClient) List(ctx context.Context, resourceURI string) (result DiagnosticSettingsResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx, resourceURI) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client DiagnosticSettingsClient) ListPreparer(ctx context.Context, resourceURI string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceUri": resourceURI, - } - - const APIVersion = "2017-05-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client DiagnosticSettingsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client DiagnosticSettingsClient) ListResponder(resp *http.Response) (result DiagnosticSettingsResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go deleted file mode 100644 index 48365ed67e6..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go +++ /dev/null @@ -1,180 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// DiagnosticSettingsCategoryClient is the monitor Management Client -type DiagnosticSettingsCategoryClient struct { - BaseClient -} - -// NewDiagnosticSettingsCategoryClient creates an instance of the DiagnosticSettingsCategoryClient client. -func NewDiagnosticSettingsCategoryClient(subscriptionID string) DiagnosticSettingsCategoryClient { - return NewDiagnosticSettingsCategoryClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewDiagnosticSettingsCategoryClientWithBaseURI creates an instance of the DiagnosticSettingsCategoryClient client -// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign -// clouds, Azure stack). -func NewDiagnosticSettingsCategoryClientWithBaseURI(baseURI string, subscriptionID string) DiagnosticSettingsCategoryClient { - return DiagnosticSettingsCategoryClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// Get gets the diagnostic settings category for the specified resource. -// Parameters: -// resourceURI - the identifier of the resource. -// name - the name of the diagnostic setting. -func (client DiagnosticSettingsCategoryClient) Get(ctx context.Context, resourceURI string, name string) (result DiagnosticSettingsCategoryResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsCategoryClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceURI, name) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client DiagnosticSettingsCategoryClient) GetPreparer(ctx context.Context, resourceURI string, name string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "name": autorest.Encode("path", name), - "resourceUri": resourceURI, - } - - const APIVersion = "2017-05-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettingsCategories/{name}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client DiagnosticSettingsCategoryClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client DiagnosticSettingsCategoryClient) GetResponder(resp *http.Response) (result DiagnosticSettingsCategoryResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List lists the diagnostic settings categories for the specified resource. -// Parameters: -// resourceURI - the identifier of the resource. -func (client DiagnosticSettingsCategoryClient) List(ctx context.Context, resourceURI string) (result DiagnosticSettingsCategoryResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsCategoryClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx, resourceURI) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client DiagnosticSettingsCategoryClient) ListPreparer(ctx context.Context, resourceURI string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceUri": resourceURI, - } - - const APIVersion = "2017-05-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettingsCategories", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client DiagnosticSettingsCategoryClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client DiagnosticSettingsCategoryClient) ListResponder(resp *http.Response) (result DiagnosticSettingsCategoryResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go deleted file mode 100644 index 1bab3243cf6..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/enums.go +++ /dev/null @@ -1,596 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// AggregationType enumerates the values for aggregation type. -type AggregationType string - -const ( - // Average ... - Average AggregationType = "Average" - // Count ... - Count AggregationType = "Count" - // Maximum ... - Maximum AggregationType = "Maximum" - // Minimum ... - Minimum AggregationType = "Minimum" - // None ... - None AggregationType = "None" - // Total ... - Total AggregationType = "Total" -) - -// PossibleAggregationTypeValues returns an array of possible values for the AggregationType const type. -func PossibleAggregationTypeValues() []AggregationType { - return []AggregationType{Average, Count, Maximum, Minimum, None, Total} -} - -// AlertSeverity enumerates the values for alert severity. -type AlertSeverity string - -const ( - // Four ... - Four AlertSeverity = "4" - // One ... - One AlertSeverity = "1" - // Three ... - Three AlertSeverity = "3" - // Two ... - Two AlertSeverity = "2" - // Zero ... - Zero AlertSeverity = "0" -) - -// PossibleAlertSeverityValues returns an array of possible values for the AlertSeverity const type. -func PossibleAlertSeverityValues() []AlertSeverity { - return []AlertSeverity{Four, One, Three, Two, Zero} -} - -// CategoryType enumerates the values for category type. -type CategoryType string - -const ( - // Logs ... - Logs CategoryType = "Logs" - // Metrics ... - Metrics CategoryType = "Metrics" -) - -// PossibleCategoryTypeValues returns an array of possible values for the CategoryType const type. -func PossibleCategoryTypeValues() []CategoryType { - return []CategoryType{Logs, Metrics} -} - -// ComparisonOperationType enumerates the values for comparison operation type. -type ComparisonOperationType string - -const ( - // Equals ... - Equals ComparisonOperationType = "Equals" - // GreaterThan ... - GreaterThan ComparisonOperationType = "GreaterThan" - // GreaterThanOrEqual ... - GreaterThanOrEqual ComparisonOperationType = "GreaterThanOrEqual" - // LessThan ... - LessThan ComparisonOperationType = "LessThan" - // LessThanOrEqual ... - LessThanOrEqual ComparisonOperationType = "LessThanOrEqual" - // NotEquals ... - NotEquals ComparisonOperationType = "NotEquals" -) - -// PossibleComparisonOperationTypeValues returns an array of possible values for the ComparisonOperationType const type. -func PossibleComparisonOperationTypeValues() []ComparisonOperationType { - return []ComparisonOperationType{Equals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, NotEquals} -} - -// ConditionalOperator enumerates the values for conditional operator. -type ConditionalOperator string - -const ( - // ConditionalOperatorEqual ... - ConditionalOperatorEqual ConditionalOperator = "Equal" - // ConditionalOperatorGreaterThan ... - ConditionalOperatorGreaterThan ConditionalOperator = "GreaterThan" - // ConditionalOperatorLessThan ... - ConditionalOperatorLessThan ConditionalOperator = "LessThan" -) - -// PossibleConditionalOperatorValues returns an array of possible values for the ConditionalOperator const type. -func PossibleConditionalOperatorValues() []ConditionalOperator { - return []ConditionalOperator{ConditionalOperatorEqual, ConditionalOperatorGreaterThan, ConditionalOperatorLessThan} -} - -// ConditionOperator enumerates the values for condition operator. -type ConditionOperator string - -const ( - // ConditionOperatorGreaterThan ... - ConditionOperatorGreaterThan ConditionOperator = "GreaterThan" - // ConditionOperatorGreaterThanOrEqual ... - ConditionOperatorGreaterThanOrEqual ConditionOperator = "GreaterThanOrEqual" - // ConditionOperatorLessThan ... - ConditionOperatorLessThan ConditionOperator = "LessThan" - // ConditionOperatorLessThanOrEqual ... - ConditionOperatorLessThanOrEqual ConditionOperator = "LessThanOrEqual" -) - -// PossibleConditionOperatorValues returns an array of possible values for the ConditionOperator const type. -func PossibleConditionOperatorValues() []ConditionOperator { - return []ConditionOperator{ConditionOperatorGreaterThan, ConditionOperatorGreaterThanOrEqual, ConditionOperatorLessThan, ConditionOperatorLessThanOrEqual} -} - -// CriterionType enumerates the values for criterion type. -type CriterionType string - -const ( - // CriterionTypeDynamicThresholdCriterion ... - CriterionTypeDynamicThresholdCriterion CriterionType = "DynamicThresholdCriterion" - // CriterionTypeMultiMetricCriteria ... - CriterionTypeMultiMetricCriteria CriterionType = "MultiMetricCriteria" - // CriterionTypeStaticThresholdCriterion ... - CriterionTypeStaticThresholdCriterion CriterionType = "StaticThresholdCriterion" -) - -// PossibleCriterionTypeValues returns an array of possible values for the CriterionType const type. -func PossibleCriterionTypeValues() []CriterionType { - return []CriterionType{CriterionTypeDynamicThresholdCriterion, CriterionTypeMultiMetricCriteria, CriterionTypeStaticThresholdCriterion} -} - -// DynamicThresholdOperator enumerates the values for dynamic threshold operator. -type DynamicThresholdOperator string - -const ( - // DynamicThresholdOperatorGreaterOrLessThan ... - DynamicThresholdOperatorGreaterOrLessThan DynamicThresholdOperator = "GreaterOrLessThan" - // DynamicThresholdOperatorGreaterThan ... - DynamicThresholdOperatorGreaterThan DynamicThresholdOperator = "GreaterThan" - // DynamicThresholdOperatorLessThan ... - DynamicThresholdOperatorLessThan DynamicThresholdOperator = "LessThan" -) - -// PossibleDynamicThresholdOperatorValues returns an array of possible values for the DynamicThresholdOperator const type. -func PossibleDynamicThresholdOperatorValues() []DynamicThresholdOperator { - return []DynamicThresholdOperator{DynamicThresholdOperatorGreaterOrLessThan, DynamicThresholdOperatorGreaterThan, DynamicThresholdOperatorLessThan} -} - -// DynamicThresholdSensitivity enumerates the values for dynamic threshold sensitivity. -type DynamicThresholdSensitivity string - -const ( - // High ... - High DynamicThresholdSensitivity = "High" - // Low ... - Low DynamicThresholdSensitivity = "Low" - // Medium ... - Medium DynamicThresholdSensitivity = "Medium" -) - -// PossibleDynamicThresholdSensitivityValues returns an array of possible values for the DynamicThresholdSensitivity const type. -func PossibleDynamicThresholdSensitivityValues() []DynamicThresholdSensitivity { - return []DynamicThresholdSensitivity{High, Low, Medium} -} - -// Enabled enumerates the values for enabled. -type Enabled string - -const ( - // False ... - False Enabled = "false" - // True ... - True Enabled = "true" -) - -// PossibleEnabledValues returns an array of possible values for the Enabled const type. -func PossibleEnabledValues() []Enabled { - return []Enabled{False, True} -} - -// EventLevel enumerates the values for event level. -type EventLevel string - -const ( - // Critical ... - Critical EventLevel = "Critical" - // Error ... - Error EventLevel = "Error" - // Informational ... - Informational EventLevel = "Informational" - // Verbose ... - Verbose EventLevel = "Verbose" - // Warning ... - Warning EventLevel = "Warning" -) - -// PossibleEventLevelValues returns an array of possible values for the EventLevel const type. -func PossibleEventLevelValues() []EventLevel { - return []EventLevel{Critical, Error, Informational, Verbose, Warning} -} - -// MetricStatisticType enumerates the values for metric statistic type. -type MetricStatisticType string - -const ( - // MetricStatisticTypeAverage ... - MetricStatisticTypeAverage MetricStatisticType = "Average" - // MetricStatisticTypeMax ... - MetricStatisticTypeMax MetricStatisticType = "Max" - // MetricStatisticTypeMin ... - MetricStatisticTypeMin MetricStatisticType = "Min" - // MetricStatisticTypeSum ... - MetricStatisticTypeSum MetricStatisticType = "Sum" -) - -// PossibleMetricStatisticTypeValues returns an array of possible values for the MetricStatisticType const type. -func PossibleMetricStatisticTypeValues() []MetricStatisticType { - return []MetricStatisticType{MetricStatisticTypeAverage, MetricStatisticTypeMax, MetricStatisticTypeMin, MetricStatisticTypeSum} -} - -// MetricTriggerType enumerates the values for metric trigger type. -type MetricTriggerType string - -const ( - // MetricTriggerTypeConsecutive ... - MetricTriggerTypeConsecutive MetricTriggerType = "Consecutive" - // MetricTriggerTypeTotal ... - MetricTriggerTypeTotal MetricTriggerType = "Total" -) - -// PossibleMetricTriggerTypeValues returns an array of possible values for the MetricTriggerType const type. -func PossibleMetricTriggerTypeValues() []MetricTriggerType { - return []MetricTriggerType{MetricTriggerTypeConsecutive, MetricTriggerTypeTotal} -} - -// OdataType enumerates the values for odata type. -type OdataType string - -const ( - // OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource ... - OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource OdataType = "Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource" - // OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource ... - OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource OdataType = "Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource" - // OdataTypeRuleDataSource ... - OdataTypeRuleDataSource OdataType = "RuleDataSource" -) - -// PossibleOdataTypeValues returns an array of possible values for the OdataType const type. -func PossibleOdataTypeValues() []OdataType { - return []OdataType{OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource, OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource, OdataTypeRuleDataSource} -} - -// OdataTypeBasicAction enumerates the values for odata type basic action. -type OdataTypeBasicAction string - -const ( - // OdataTypeAction ... - OdataTypeAction OdataTypeBasicAction = "Action" - // OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction ... - OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction OdataTypeBasicAction = "Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.AlertingAction" - // OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction ... - OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction OdataTypeBasicAction = "Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.LogToMetricAction" -) - -// PossibleOdataTypeBasicActionValues returns an array of possible values for the OdataTypeBasicAction const type. -func PossibleOdataTypeBasicActionValues() []OdataTypeBasicAction { - return []OdataTypeBasicAction{OdataTypeAction, OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction, OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction} -} - -// OdataTypeBasicMetricAlertCriteria enumerates the values for odata type basic metric alert criteria. -type OdataTypeBasicMetricAlertCriteria string - -const ( - // OdataTypeMetricAlertCriteria ... - OdataTypeMetricAlertCriteria OdataTypeBasicMetricAlertCriteria = "MetricAlertCriteria" - // OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria ... - OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria OdataTypeBasicMetricAlertCriteria = "Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria" - // OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria ... - OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria OdataTypeBasicMetricAlertCriteria = "Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria" - // OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria ... - OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria OdataTypeBasicMetricAlertCriteria = "Microsoft.Azure.Monitor.WebtestLocationAvailabilityCriteria" -) - -// PossibleOdataTypeBasicMetricAlertCriteriaValues returns an array of possible values for the OdataTypeBasicMetricAlertCriteria const type. -func PossibleOdataTypeBasicMetricAlertCriteriaValues() []OdataTypeBasicMetricAlertCriteria { - return []OdataTypeBasicMetricAlertCriteria{OdataTypeMetricAlertCriteria, OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria, OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria, OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria} -} - -// OdataTypeBasicRuleAction enumerates the values for odata type basic rule action. -type OdataTypeBasicRuleAction string - -const ( - // OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction ... - OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction OdataTypeBasicRuleAction = "Microsoft.Azure.Management.Insights.Models.RuleEmailAction" - // OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction ... - OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction OdataTypeBasicRuleAction = "Microsoft.Azure.Management.Insights.Models.RuleWebhookAction" - // OdataTypeRuleAction ... - OdataTypeRuleAction OdataTypeBasicRuleAction = "RuleAction" -) - -// PossibleOdataTypeBasicRuleActionValues returns an array of possible values for the OdataTypeBasicRuleAction const type. -func PossibleOdataTypeBasicRuleActionValues() []OdataTypeBasicRuleAction { - return []OdataTypeBasicRuleAction{OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction, OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction, OdataTypeRuleAction} -} - -// OdataTypeBasicRuleCondition enumerates the values for odata type basic rule condition. -type OdataTypeBasicRuleCondition string - -const ( - // OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition ... - OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition OdataTypeBasicRuleCondition = "Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition" - // OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition ... - OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition OdataTypeBasicRuleCondition = "Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition" - // OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition ... - OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition OdataTypeBasicRuleCondition = "Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition" - // OdataTypeRuleCondition ... - OdataTypeRuleCondition OdataTypeBasicRuleCondition = "RuleCondition" -) - -// PossibleOdataTypeBasicRuleConditionValues returns an array of possible values for the OdataTypeBasicRuleCondition const type. -func PossibleOdataTypeBasicRuleConditionValues() []OdataTypeBasicRuleCondition { - return []OdataTypeBasicRuleCondition{OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition, OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition, OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition, OdataTypeRuleCondition} -} - -// Operator enumerates the values for operator. -type Operator string - -const ( - // OperatorEquals ... - OperatorEquals Operator = "Equals" - // OperatorGreaterThan ... - OperatorGreaterThan Operator = "GreaterThan" - // OperatorGreaterThanOrEqual ... - OperatorGreaterThanOrEqual Operator = "GreaterThanOrEqual" - // OperatorLessThan ... - OperatorLessThan Operator = "LessThan" - // OperatorLessThanOrEqual ... - OperatorLessThanOrEqual Operator = "LessThanOrEqual" - // OperatorNotEquals ... - OperatorNotEquals Operator = "NotEquals" -) - -// PossibleOperatorValues returns an array of possible values for the Operator const type. -func PossibleOperatorValues() []Operator { - return []Operator{OperatorEquals, OperatorGreaterThan, OperatorGreaterThanOrEqual, OperatorLessThan, OperatorLessThanOrEqual, OperatorNotEquals} -} - -// ProvisioningState enumerates the values for provisioning state. -type ProvisioningState string - -const ( - // Canceled ... - Canceled ProvisioningState = "Canceled" - // Deploying ... - Deploying ProvisioningState = "Deploying" - // Failed ... - Failed ProvisioningState = "Failed" - // Succeeded ... - Succeeded ProvisioningState = "Succeeded" -) - -// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. -func PossibleProvisioningStateValues() []ProvisioningState { - return []ProvisioningState{Canceled, Deploying, Failed, Succeeded} -} - -// QueryType enumerates the values for query type. -type QueryType string - -const ( - // ResultCount ... - ResultCount QueryType = "ResultCount" -) - -// PossibleQueryTypeValues returns an array of possible values for the QueryType const type. -func PossibleQueryTypeValues() []QueryType { - return []QueryType{ResultCount} -} - -// ReceiverStatus enumerates the values for receiver status. -type ReceiverStatus string - -const ( - // ReceiverStatusDisabled ... - ReceiverStatusDisabled ReceiverStatus = "Disabled" - // ReceiverStatusEnabled ... - ReceiverStatusEnabled ReceiverStatus = "Enabled" - // ReceiverStatusNotSpecified ... - ReceiverStatusNotSpecified ReceiverStatus = "NotSpecified" -) - -// PossibleReceiverStatusValues returns an array of possible values for the ReceiverStatus const type. -func PossibleReceiverStatusValues() []ReceiverStatus { - return []ReceiverStatus{ReceiverStatusDisabled, ReceiverStatusEnabled, ReceiverStatusNotSpecified} -} - -// RecurrenceFrequency enumerates the values for recurrence frequency. -type RecurrenceFrequency string - -const ( - // RecurrenceFrequencyDay ... - RecurrenceFrequencyDay RecurrenceFrequency = "Day" - // RecurrenceFrequencyHour ... - RecurrenceFrequencyHour RecurrenceFrequency = "Hour" - // RecurrenceFrequencyMinute ... - RecurrenceFrequencyMinute RecurrenceFrequency = "Minute" - // RecurrenceFrequencyMonth ... - RecurrenceFrequencyMonth RecurrenceFrequency = "Month" - // RecurrenceFrequencyNone ... - RecurrenceFrequencyNone RecurrenceFrequency = "None" - // RecurrenceFrequencySecond ... - RecurrenceFrequencySecond RecurrenceFrequency = "Second" - // RecurrenceFrequencyWeek ... - RecurrenceFrequencyWeek RecurrenceFrequency = "Week" - // RecurrenceFrequencyYear ... - RecurrenceFrequencyYear RecurrenceFrequency = "Year" -) - -// PossibleRecurrenceFrequencyValues returns an array of possible values for the RecurrenceFrequency const type. -func PossibleRecurrenceFrequencyValues() []RecurrenceFrequency { - return []RecurrenceFrequency{RecurrenceFrequencyDay, RecurrenceFrequencyHour, RecurrenceFrequencyMinute, RecurrenceFrequencyMonth, RecurrenceFrequencyNone, RecurrenceFrequencySecond, RecurrenceFrequencyWeek, RecurrenceFrequencyYear} -} - -// ResultType enumerates the values for result type. -type ResultType string - -const ( - // Data ... - Data ResultType = "Data" - // Metadata ... - Metadata ResultType = "Metadata" -) - -// PossibleResultTypeValues returns an array of possible values for the ResultType const type. -func PossibleResultTypeValues() []ResultType { - return []ResultType{Data, Metadata} -} - -// ScaleDirection enumerates the values for scale direction. -type ScaleDirection string - -const ( - // ScaleDirectionDecrease ... - ScaleDirectionDecrease ScaleDirection = "Decrease" - // ScaleDirectionIncrease ... - ScaleDirectionIncrease ScaleDirection = "Increase" - // ScaleDirectionNone ... - ScaleDirectionNone ScaleDirection = "None" -) - -// PossibleScaleDirectionValues returns an array of possible values for the ScaleDirection const type. -func PossibleScaleDirectionValues() []ScaleDirection { - return []ScaleDirection{ScaleDirectionDecrease, ScaleDirectionIncrease, ScaleDirectionNone} -} - -// ScaleRuleMetricDimensionOperationType enumerates the values for scale rule metric dimension operation type. -type ScaleRuleMetricDimensionOperationType string - -const ( - // ScaleRuleMetricDimensionOperationTypeEquals ... - ScaleRuleMetricDimensionOperationTypeEquals ScaleRuleMetricDimensionOperationType = "Equals" - // ScaleRuleMetricDimensionOperationTypeNotEquals ... - ScaleRuleMetricDimensionOperationTypeNotEquals ScaleRuleMetricDimensionOperationType = "NotEquals" -) - -// PossibleScaleRuleMetricDimensionOperationTypeValues returns an array of possible values for the ScaleRuleMetricDimensionOperationType const type. -func PossibleScaleRuleMetricDimensionOperationTypeValues() []ScaleRuleMetricDimensionOperationType { - return []ScaleRuleMetricDimensionOperationType{ScaleRuleMetricDimensionOperationTypeEquals, ScaleRuleMetricDimensionOperationTypeNotEquals} -} - -// ScaleType enumerates the values for scale type. -type ScaleType string - -const ( - // ChangeCount ... - ChangeCount ScaleType = "ChangeCount" - // ExactCount ... - ExactCount ScaleType = "ExactCount" - // PercentChangeCount ... - PercentChangeCount ScaleType = "PercentChangeCount" -) - -// PossibleScaleTypeValues returns an array of possible values for the ScaleType const type. -func PossibleScaleTypeValues() []ScaleType { - return []ScaleType{ChangeCount, ExactCount, PercentChangeCount} -} - -// Sensitivity enumerates the values for sensitivity. -type Sensitivity string - -const ( - // SensitivityHigh ... - SensitivityHigh Sensitivity = "High" - // SensitivityLow ... - SensitivityLow Sensitivity = "Low" - // SensitivityMedium ... - SensitivityMedium Sensitivity = "Medium" -) - -// PossibleSensitivityValues returns an array of possible values for the Sensitivity const type. -func PossibleSensitivityValues() []Sensitivity { - return []Sensitivity{SensitivityHigh, SensitivityLow, SensitivityMedium} -} - -// TimeAggregationOperator enumerates the values for time aggregation operator. -type TimeAggregationOperator string - -const ( - // TimeAggregationOperatorAverage ... - TimeAggregationOperatorAverage TimeAggregationOperator = "Average" - // TimeAggregationOperatorLast ... - TimeAggregationOperatorLast TimeAggregationOperator = "Last" - // TimeAggregationOperatorMaximum ... - TimeAggregationOperatorMaximum TimeAggregationOperator = "Maximum" - // TimeAggregationOperatorMinimum ... - TimeAggregationOperatorMinimum TimeAggregationOperator = "Minimum" - // TimeAggregationOperatorTotal ... - TimeAggregationOperatorTotal TimeAggregationOperator = "Total" -) - -// PossibleTimeAggregationOperatorValues returns an array of possible values for the TimeAggregationOperator const type. -func PossibleTimeAggregationOperatorValues() []TimeAggregationOperator { - return []TimeAggregationOperator{TimeAggregationOperatorAverage, TimeAggregationOperatorLast, TimeAggregationOperatorMaximum, TimeAggregationOperatorMinimum, TimeAggregationOperatorTotal} -} - -// TimeAggregationType enumerates the values for time aggregation type. -type TimeAggregationType string - -const ( - // TimeAggregationTypeAverage ... - TimeAggregationTypeAverage TimeAggregationType = "Average" - // TimeAggregationTypeCount ... - TimeAggregationTypeCount TimeAggregationType = "Count" - // TimeAggregationTypeLast ... - TimeAggregationTypeLast TimeAggregationType = "Last" - // TimeAggregationTypeMaximum ... - TimeAggregationTypeMaximum TimeAggregationType = "Maximum" - // TimeAggregationTypeMinimum ... - TimeAggregationTypeMinimum TimeAggregationType = "Minimum" - // TimeAggregationTypeTotal ... - TimeAggregationTypeTotal TimeAggregationType = "Total" -) - -// PossibleTimeAggregationTypeValues returns an array of possible values for the TimeAggregationType const type. -func PossibleTimeAggregationTypeValues() []TimeAggregationType { - return []TimeAggregationType{TimeAggregationTypeAverage, TimeAggregationTypeCount, TimeAggregationTypeLast, TimeAggregationTypeMaximum, TimeAggregationTypeMinimum, TimeAggregationTypeTotal} -} - -// Unit enumerates the values for unit. -type Unit string - -const ( - // UnitBitsPerSecond ... - UnitBitsPerSecond Unit = "BitsPerSecond" - // UnitBytes ... - UnitBytes Unit = "Bytes" - // UnitByteSeconds ... - UnitByteSeconds Unit = "ByteSeconds" - // UnitBytesPerSecond ... - UnitBytesPerSecond Unit = "BytesPerSecond" - // UnitCores ... - UnitCores Unit = "Cores" - // UnitCount ... - UnitCount Unit = "Count" - // UnitCountPerSecond ... - UnitCountPerSecond Unit = "CountPerSecond" - // UnitMilliCores ... - UnitMilliCores Unit = "MilliCores" - // UnitMilliSeconds ... - UnitMilliSeconds Unit = "MilliSeconds" - // UnitNanoCores ... - UnitNanoCores Unit = "NanoCores" - // UnitPercent ... - UnitPercent Unit = "Percent" - // UnitSeconds ... - UnitSeconds Unit = "Seconds" - // UnitUnspecified ... - UnitUnspecified Unit = "Unspecified" -) - -// PossibleUnitValues returns an array of possible values for the Unit const type. -func PossibleUnitValues() []Unit { - return []Unit{UnitBitsPerSecond, UnitBytes, UnitByteSeconds, UnitBytesPerSecond, UnitCores, UnitCount, UnitCountPerSecond, UnitMilliCores, UnitMilliSeconds, UnitNanoCores, UnitPercent, UnitSeconds, UnitUnspecified} -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go deleted file mode 100644 index 86e59b42436..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go +++ /dev/null @@ -1,99 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// EventCategoriesClient is the monitor Management Client -type EventCategoriesClient struct { - BaseClient -} - -// NewEventCategoriesClient creates an instance of the EventCategoriesClient client. -func NewEventCategoriesClient(subscriptionID string) EventCategoriesClient { - return NewEventCategoriesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewEventCategoriesClientWithBaseURI creates an instance of the EventCategoriesClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewEventCategoriesClientWithBaseURI(baseURI string, subscriptionID string) EventCategoriesClient { - return EventCategoriesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List get the list of available event categories supported in the Activity Logs Service.
The current list includes -// the following: Administrative, Security, ServiceHealth, Alert, Recommendation, Policy. -func (client EventCategoriesClient) List(ctx context.Context) (result EventCategoryCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventCategoriesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.EventCategoriesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.EventCategoriesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.EventCategoriesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client EventCategoriesClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/microsoft.insights/eventcategories"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client EventCategoriesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client EventCategoriesClient) ListResponder(resp *http.Response) (result EventCategoryCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go deleted file mode 100644 index ad1500b87bc..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go +++ /dev/null @@ -1,418 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// LogProfilesClient is the monitor Management Client -type LogProfilesClient struct { - BaseClient -} - -// NewLogProfilesClient creates an instance of the LogProfilesClient client. -func NewLogProfilesClient(subscriptionID string) LogProfilesClient { - return NewLogProfilesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewLogProfilesClientWithBaseURI creates an instance of the LogProfilesClient client using a custom endpoint. Use -// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewLogProfilesClientWithBaseURI(baseURI string, subscriptionID string) LogProfilesClient { - return LogProfilesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate create or update a log profile in Azure Monitoring REST API. -// Parameters: -// logProfileName - the name of the log profile. -// parameters - parameters supplied to the operation. -func (client LogProfilesClient) CreateOrUpdate(ctx context.Context, logProfileName string, parameters LogProfileResource) (result LogProfileResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.LogProfileProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.LogProfileProperties.Locations", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.LogProfileProperties.Categories", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.LogProfileProperties.RetentionPolicy", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.LogProfileProperties.RetentionPolicy.Enabled", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.LogProfileProperties.RetentionPolicy.Days", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.LogProfileProperties.RetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: int64(0), Chain: nil}}}, - }}, - }}}}}); err != nil { - return result, validation.NewError("insights.LogProfilesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, logProfileName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client LogProfilesClient) CreateOrUpdatePreparer(ctx context.Context, logProfileName string, parameters LogProfileResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "logProfileName": autorest.Encode("path", logProfileName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client LogProfilesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client LogProfilesClient) CreateOrUpdateResponder(resp *http.Response) (result LogProfileResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes the log profile. -// Parameters: -// logProfileName - the name of the log profile. -func (client LogProfilesClient) Delete(ctx context.Context, logProfileName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, logProfileName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client LogProfilesClient) DeletePreparer(ctx context.Context, logProfileName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "logProfileName": autorest.Encode("path", logProfileName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client LogProfilesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client LogProfilesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets the log profile. -// Parameters: -// logProfileName - the name of the log profile. -func (client LogProfilesClient) Get(ctx context.Context, logProfileName string) (result LogProfileResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, logProfileName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client LogProfilesClient) GetPreparer(ctx context.Context, logProfileName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "logProfileName": autorest.Encode("path", logProfileName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client LogProfilesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client LogProfilesClient) GetResponder(resp *http.Response) (result LogProfileResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// List list the log profiles. -func (client LogProfilesClient) List(ctx context.Context) (result LogProfileCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client LogProfilesClient) ListPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client LogProfilesClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client LogProfilesClient) ListResponder(resp *http.Response) (result LogProfileCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update updates an existing LogProfilesResource. To update other fields use the CreateOrUpdate method. -// Parameters: -// logProfileName - the name of the log profile. -// logProfilesResource - parameters supplied to the operation. -func (client LogProfilesClient) Update(ctx context.Context, logProfileName string, logProfilesResource LogProfileResourcePatch) (result LogProfileResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, logProfileName, logProfilesResource) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client LogProfilesClient) UpdatePreparer(ctx context.Context, logProfileName string, logProfilesResource LogProfileResourcePatch) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "logProfileName": autorest.Encode("path", logProfileName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2016-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), - autorest.WithJSON(logProfilesResource), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client LogProfilesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client LogProfilesClient) UpdateResponder(resp *http.Response) (result LogProfileResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go deleted file mode 100644 index 94231b6fa34..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go +++ /dev/null @@ -1,498 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// MetricAlertsClient is the monitor Management Client -type MetricAlertsClient struct { - BaseClient -} - -// NewMetricAlertsClient creates an instance of the MetricAlertsClient client. -func NewMetricAlertsClient(subscriptionID string) MetricAlertsClient { - return NewMetricAlertsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewMetricAlertsClientWithBaseURI creates an instance of the MetricAlertsClient client using a custom endpoint. Use -// this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewMetricAlertsClientWithBaseURI(baseURI string, subscriptionID string) MetricAlertsClient { - return MetricAlertsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate create or update an metric alert definition. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// parameters - the parameters of the rule to create or update. -func (client MetricAlertsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResource) (result MetricAlertResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.MetricAlertProperties", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.MetricAlertProperties.Description", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.MetricAlertProperties.Severity", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.MetricAlertProperties.Enabled", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.MetricAlertProperties.EvaluationFrequency", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.MetricAlertProperties.WindowSize", Name: validation.Null, Rule: true, Chain: nil}, - }}}}}); err != nil { - return result, validation.NewError("insights.MetricAlertsClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, ruleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client MetricAlertsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client MetricAlertsClient) CreateOrUpdateResponder(resp *http.Response) (result MetricAlertResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete delete an alert rule definition. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client MetricAlertsClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client MetricAlertsClient) DeletePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client MetricAlertsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get retrieve an alert rule definition. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client MetricAlertsClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result MetricAlertResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client MetricAlertsClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client MetricAlertsClient) GetResponder(resp *http.Response) (result MetricAlertResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup retrieve alert rule definitions in a resource group. -// Parameters: -// resourceGroupName - the name of the resource group. -func (client MetricAlertsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result MetricAlertResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client MetricAlertsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client MetricAlertsClient) ListByResourceGroupResponder(resp *http.Response) (result MetricAlertResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListBySubscription retrieve alert rule definitions in a subscription. -func (client MetricAlertsClient) ListBySubscription(ctx context.Context) (result MetricAlertResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.ListBySubscription") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListBySubscriptionPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListBySubscription", nil, "Failure preparing request") - return - } - - resp, err := client.ListBySubscriptionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListBySubscription", resp, "Failure sending request") - return - } - - result, err = client.ListBySubscriptionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListBySubscription", resp, "Failure responding to request") - return - } - - return -} - -// ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client MetricAlertsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Insights/metricAlerts", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListBySubscriptionSender sends the ListBySubscription request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always -// closes the http.Response Body. -func (client MetricAlertsClient) ListBySubscriptionResponder(resp *http.Response) (result MetricAlertResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update update an metric alert definition. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// parameters - the parameters of the rule to update. -func (client MetricAlertsClient) Update(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResourcePatch) (result MetricAlertResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client MetricAlertsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResourcePatch) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client MetricAlertsClient) UpdateResponder(resp *http.Response) (result MetricAlertResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go deleted file mode 100644 index 43cfaed4276..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go +++ /dev/null @@ -1,186 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// MetricAlertsStatusClient is the monitor Management Client -type MetricAlertsStatusClient struct { - BaseClient -} - -// NewMetricAlertsStatusClient creates an instance of the MetricAlertsStatusClient client. -func NewMetricAlertsStatusClient(subscriptionID string) MetricAlertsStatusClient { - return NewMetricAlertsStatusClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewMetricAlertsStatusClientWithBaseURI creates an instance of the MetricAlertsStatusClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewMetricAlertsStatusClientWithBaseURI(baseURI string, subscriptionID string) MetricAlertsStatusClient { - return MetricAlertsStatusClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List retrieve an alert rule status. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client MetricAlertsStatusClient) List(ctx context.Context, resourceGroupName string, ruleName string) (result MetricAlertStatusCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsStatusClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client MetricAlertsStatusClient) ListPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}/status", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsStatusClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client MetricAlertsStatusClient) ListResponder(resp *http.Response) (result MetricAlertStatusCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByName retrieve an alert rule status. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// statusName - the name of the status. -func (client MetricAlertsStatusClient) ListByName(ctx context.Context, resourceGroupName string, ruleName string, statusName string) (result MetricAlertStatusCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsStatusClient.ListByName") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListByNamePreparer(ctx, resourceGroupName, ruleName, statusName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "ListByName", nil, "Failure preparing request") - return - } - - resp, err := client.ListByNameSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "ListByName", resp, "Failure sending request") - return - } - - result, err = client.ListByNameResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "ListByName", resp, "Failure responding to request") - return - } - - return -} - -// ListByNamePreparer prepares the ListByName request. -func (client MetricAlertsStatusClient) ListByNamePreparer(ctx context.Context, resourceGroupName string, ruleName string, statusName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "statusName": autorest.Encode("path", statusName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-03-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}/status/{statusName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByNameSender sends the ListByName request. The method will close the -// http.Response Body if it receives an error. -func (client MetricAlertsStatusClient) ListByNameSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByNameResponder handles the response to the ListByName request. The method always -// closes the http.Response Body. -func (client MetricAlertsStatusClient) ListByNameResponder(resp *http.Response) (result MetricAlertStatusCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go deleted file mode 100644 index d395fc67bc5..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go +++ /dev/null @@ -1,217 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// MetricBaselineClient is the monitor Management Client -type MetricBaselineClient struct { - BaseClient -} - -// NewMetricBaselineClient creates an instance of the MetricBaselineClient client. -func NewMetricBaselineClient(subscriptionID string) MetricBaselineClient { - return NewMetricBaselineClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewMetricBaselineClientWithBaseURI creates an instance of the MetricBaselineClient client using a custom endpoint. -// Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewMetricBaselineClientWithBaseURI(baseURI string, subscriptionID string) MetricBaselineClient { - return MetricBaselineClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CalculateBaseline **Lists the baseline values for a resource**. -// Parameters: -// resourceURI - the identifier of the resource. It has the following structure: -// subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}. -// For example: -// subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1 -// timeSeriesInformation - information that need to be specified to calculate a baseline on a time series. -func (client MetricBaselineClient) CalculateBaseline(ctx context.Context, resourceURI string, timeSeriesInformation TimeSeriesInformation) (result CalculateBaselineResponse, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricBaselineClient.CalculateBaseline") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: timeSeriesInformation, - Constraints: []validation.Constraint{{Target: "timeSeriesInformation.Sensitivities", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "timeSeriesInformation.Values", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { - return result, validation.NewError("insights.MetricBaselineClient", "CalculateBaseline", err.Error()) - } - - req, err := client.CalculateBaselinePreparer(ctx, resourceURI, timeSeriesInformation) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "CalculateBaseline", nil, "Failure preparing request") - return - } - - resp, err := client.CalculateBaselineSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "CalculateBaseline", resp, "Failure sending request") - return - } - - result, err = client.CalculateBaselineResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "CalculateBaseline", resp, "Failure responding to request") - return - } - - return -} - -// CalculateBaselinePreparer prepares the CalculateBaseline request. -func (client MetricBaselineClient) CalculateBaselinePreparer(ctx context.Context, resourceURI string, timeSeriesInformation TimeSeriesInformation) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceUri": resourceURI, - } - - const APIVersion = "2017-11-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPost(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/calculatebaseline", pathParameters), - autorest.WithJSON(timeSeriesInformation), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CalculateBaselineSender sends the CalculateBaseline request. The method will close the -// http.Response Body if it receives an error. -func (client MetricBaselineClient) CalculateBaselineSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// CalculateBaselineResponder handles the response to the CalculateBaseline request. The method always -// closes the http.Response Body. -func (client MetricBaselineClient) CalculateBaselineResponder(resp *http.Response) (result CalculateBaselineResponse, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Get **Gets the baseline values for a specific metric**. -// Parameters: -// resourceURI - the identifier of the resource. It has the following structure: -// subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}. -// For example: -// subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1 -// metricName - the name of the metric to retrieve the baseline for. -// timespan - the timespan of the query. It is a string with the following format -// 'startDateTime_ISO/endDateTime_ISO'. -// interval - the interval (i.e. timegrain) of the query. -// aggregation - the aggregation type of the metric to retrieve the baseline for. -// sensitivities - the list of sensitivities (comma separated) to retrieve. -// resultType - allows retrieving only metadata of the baseline. On data request all information is retrieved. -func (client MetricBaselineClient) Get(ctx context.Context, resourceURI string, metricName string, timespan string, interval *string, aggregation string, sensitivities string, resultType ResultType) (result BaselineResponse, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricBaselineClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceURI, metricName, timespan, interval, aggregation, sensitivities, resultType) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client MetricBaselineClient) GetPreparer(ctx context.Context, resourceURI string, metricName string, timespan string, interval *string, aggregation string, sensitivities string, resultType ResultType) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "metricName": autorest.Encode("path", metricName), - "resourceUri": resourceURI, - } - - const APIVersion = "2017-11-01-preview" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(timespan) > 0 { - queryParameters["timespan"] = autorest.Encode("query", timespan) - } - if interval != nil { - queryParameters["interval"] = autorest.Encode("query", *interval) - } - if len(aggregation) > 0 { - queryParameters["aggregation"] = autorest.Encode("query", aggregation) - } - if len(sensitivities) > 0 { - queryParameters["sensitivities"] = autorest.Encode("query", sensitivities) - } - if len(string(resultType)) > 0 { - queryParameters["resultType"] = autorest.Encode("query", resultType) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/baseline/{metricName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client MetricBaselineClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client MetricBaselineClient) GetResponder(resp *http.Response) (result BaselineResponse, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go deleted file mode 100644 index 69fc2e0b465..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go +++ /dev/null @@ -1,109 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// MetricDefinitionsClient is the monitor Management Client -type MetricDefinitionsClient struct { - BaseClient -} - -// NewMetricDefinitionsClient creates an instance of the MetricDefinitionsClient client. -func NewMetricDefinitionsClient(subscriptionID string) MetricDefinitionsClient { - return NewMetricDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewMetricDefinitionsClientWithBaseURI creates an instance of the MetricDefinitionsClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewMetricDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) MetricDefinitionsClient { - return MetricDefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists the metric definitions for the resource. -// Parameters: -// resourceURI - the identifier of the resource. -// metricnamespace - metric namespace to query metric definitions for. -func (client MetricDefinitionsClient) List(ctx context.Context, resourceURI string, metricnamespace string) (result MetricDefinitionCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricDefinitionsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx, resourceURI, metricnamespace) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricDefinitionsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricDefinitionsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricDefinitionsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client MetricDefinitionsClient) ListPreparer(ctx context.Context, resourceURI string, metricnamespace string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceUri": resourceURI, - } - - const APIVersion = "2018-01-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(metricnamespace) > 0 { - queryParameters["metricnamespace"] = autorest.Encode("query", metricnamespace) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/metricDefinitions", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client MetricDefinitionsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client MetricDefinitionsClient) ListResponder(resp *http.Response) (result MetricDefinitionCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go deleted file mode 100644 index 97687a09b49..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go +++ /dev/null @@ -1,152 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// MetricsClient is the monitor Management Client -type MetricsClient struct { - BaseClient -} - -// NewMetricsClient creates an instance of the MetricsClient client. -func NewMetricsClient(subscriptionID string) MetricsClient { - return NewMetricsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewMetricsClientWithBaseURI creates an instance of the MetricsClient client using a custom endpoint. Use this when -// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewMetricsClientWithBaseURI(baseURI string, subscriptionID string) MetricsClient { - return MetricsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List **Lists the metric values for a resource**. -// Parameters: -// resourceURI - the identifier of the resource. -// timespan - the timespan of the query. It is a string with the following format -// 'startDateTime_ISO/endDateTime_ISO'. -// interval - the interval (i.e. timegrain) of the query. -// metricnames - the names of the metrics (comma separated) to retrieve. -// aggregation - the list of aggregation types (comma separated) to retrieve. -// top - the maximum number of records to retrieve. -// Valid only if $filter is specified. -// Defaults to 10. -// orderby - the aggregation to use for sorting results and the direction of the sort. -// Only one order can be specified. -// Examples: sum asc. -// filter - the **$filter** is used to reduce the set of metric data returned.
Example:
Metric contains -// metadata A, B and C.
- Return all time series of C where A = a1 and B = b1 or b2
**$filter=A eq ‘a1’ -// and B eq ‘b1’ or B eq ‘b2’ and C eq ‘*’**
- Invalid variant:
**$filter=A eq ‘a1’ and B eq ‘b1’ and C -// eq ‘*’ or B = ‘b2’**
This is invalid because the logical or operator cannot separate two different -// metadata names.
- Return all time series where A = a1, B = b1 and C = c1:
**$filter=A eq ‘a1’ and B eq -// ‘b1’ and C eq ‘c1’**
- Return all time series where A = a1
**$filter=A eq ‘a1’ and B eq ‘*’ and C eq -// ‘*’**. -// resultType - reduces the set of data collected. The syntax allowed depends on the operation. See the -// operation's description for details. -// metricnamespace - metric namespace to query metric definitions for. -func (client MetricsClient) List(ctx context.Context, resourceURI string, timespan string, interval *string, metricnames string, aggregation string, top *int32, orderby string, filter string, resultType ResultType, metricnamespace string) (result Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/MetricsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx, resourceURI, timespan, interval, metricnames, aggregation, top, orderby, filter, resultType, metricnamespace) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.MetricsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.MetricsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client MetricsClient) ListPreparer(ctx context.Context, resourceURI string, timespan string, interval *string, metricnames string, aggregation string, top *int32, orderby string, filter string, resultType ResultType, metricnamespace string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceUri": resourceURI, - } - - const APIVersion = "2018-01-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(timespan) > 0 { - queryParameters["timespan"] = autorest.Encode("query", timespan) - } - if interval != nil { - queryParameters["interval"] = autorest.Encode("query", *interval) - } - if len(metricnames) > 0 { - queryParameters["metricnames"] = autorest.Encode("query", metricnames) - } - if len(aggregation) > 0 { - queryParameters["aggregation"] = autorest.Encode("query", aggregation) - } - if top != nil { - queryParameters["top"] = autorest.Encode("query", *top) - } - if len(orderby) > 0 { - queryParameters["orderby"] = autorest.Encode("query", orderby) - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if len(string(resultType)) > 0 { - queryParameters["resultType"] = autorest.Encode("query", resultType) - } - if len(metricnamespace) > 0 { - queryParameters["metricnamespace"] = autorest.Encode("query", metricnamespace) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/metrics", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client MetricsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client MetricsClient) ListResponder(resp *http.Response) (result Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go deleted file mode 100644 index f9cabd91d30..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go +++ /dev/null @@ -1,5204 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/json" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/autorest/to" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// The package's fully qualified name. -const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights" - -// BasicAction action descriptor. -type BasicAction interface { - AsAlertingAction() (*AlertingAction, bool) - AsLogToMetricAction() (*LogToMetricAction, bool) - AsAction() (*Action, bool) -} - -// Action action descriptor. -type Action struct { - // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction' - OdataType OdataTypeBasicAction `json:"odata.type,omitempty"` -} - -func unmarshalBasicAction(body []byte) (BasicAction, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["odata.type"] { - case string(OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction): - var aa AlertingAction - err := json.Unmarshal(body, &aa) - return aa, err - case string(OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction): - var ltma LogToMetricAction - err := json.Unmarshal(body, <ma) - return ltma, err - default: - var a Action - err := json.Unmarshal(body, &a) - return a, err - } -} -func unmarshalBasicActionArray(body []byte) ([]BasicAction, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - aArray := make([]BasicAction, len(rawMessages)) - - for index, rawMessage := range rawMessages { - a, err := unmarshalBasicAction(*rawMessage) - if err != nil { - return nil, err - } - aArray[index] = a - } - return aArray, nil -} - -// MarshalJSON is the custom marshaler for Action. -func (a Action) MarshalJSON() ([]byte, error) { - a.OdataType = OdataTypeAction - objectMap := make(map[string]interface{}) - if a.OdataType != "" { - objectMap["odata.type"] = a.OdataType - } - return json.Marshal(objectMap) -} - -// AsAlertingAction is the BasicAction implementation for Action. -func (a Action) AsAlertingAction() (*AlertingAction, bool) { - return nil, false -} - -// AsLogToMetricAction is the BasicAction implementation for Action. -func (a Action) AsLogToMetricAction() (*LogToMetricAction, bool) { - return nil, false -} - -// AsAction is the BasicAction implementation for Action. -func (a Action) AsAction() (*Action, bool) { - return &a, true -} - -// AsBasicAction is the BasicAction implementation for Action. -func (a Action) AsBasicAction() (BasicAction, bool) { - return &a, true -} - -// ActionGroup an Azure action group. -type ActionGroup struct { - // GroupShortName - The short name of the action group. This will be used in SMS messages. - GroupShortName *string `json:"groupShortName,omitempty"` - // Enabled - Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications. - Enabled *bool `json:"enabled,omitempty"` - // EmailReceivers - The list of email receivers that are part of this action group. - EmailReceivers *[]EmailReceiver `json:"emailReceivers,omitempty"` - // SmsReceivers - The list of SMS receivers that are part of this action group. - SmsReceivers *[]SmsReceiver `json:"smsReceivers,omitempty"` - // WebhookReceivers - The list of webhook receivers that are part of this action group. - WebhookReceivers *[]WebhookReceiver `json:"webhookReceivers,omitempty"` - // ItsmReceivers - The list of ITSM receivers that are part of this action group. - ItsmReceivers *[]ItsmReceiver `json:"itsmReceivers,omitempty"` - // AzureAppPushReceivers - The list of AzureAppPush receivers that are part of this action group. - AzureAppPushReceivers *[]AzureAppPushReceiver `json:"azureAppPushReceivers,omitempty"` - // AutomationRunbookReceivers - The list of AutomationRunbook receivers that are part of this action group. - AutomationRunbookReceivers *[]AutomationRunbookReceiver `json:"automationRunbookReceivers,omitempty"` - // VoiceReceivers - The list of voice receivers that are part of this action group. - VoiceReceivers *[]VoiceReceiver `json:"voiceReceivers,omitempty"` - // LogicAppReceivers - The list of logic app receivers that are part of this action group. - LogicAppReceivers *[]LogicAppReceiver `json:"logicAppReceivers,omitempty"` - // AzureFunctionReceivers - The list of azure function receivers that are part of this action group. - AzureFunctionReceivers *[]AzureFunctionReceiver `json:"azureFunctionReceivers,omitempty"` -} - -// ActionGroupList a list of action groups. -type ActionGroupList struct { - autorest.Response `json:"-"` - // Value - The list of action groups. - Value *[]ActionGroupResource `json:"value,omitempty"` - // NextLink - Provides the link to retrieve the next set of elements. - NextLink *string `json:"nextLink,omitempty"` -} - -// ActionGroupPatch an Azure action group for patch operations. -type ActionGroupPatch struct { - // Enabled - Indicates whether this action group is enabled. If an action group is not enabled, then none of its actions will be activated. - Enabled *bool `json:"enabled,omitempty"` -} - -// ActionGroupPatchBody an action group object for the body of patch operations. -type ActionGroupPatchBody struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // ActionGroupPatch - The action group settings for an update operation. - *ActionGroupPatch `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for ActionGroupPatchBody. -func (agpb ActionGroupPatchBody) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if agpb.Tags != nil { - objectMap["tags"] = agpb.Tags - } - if agpb.ActionGroupPatch != nil { - objectMap["properties"] = agpb.ActionGroupPatch - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ActionGroupPatchBody struct. -func (agpb *ActionGroupPatchBody) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - agpb.Tags = tags - } - case "properties": - if v != nil { - var actionGroupPatch ActionGroupPatch - err = json.Unmarshal(*v, &actionGroupPatch) - if err != nil { - return err - } - agpb.ActionGroupPatch = &actionGroupPatch - } - } - } - - return nil -} - -// ActionGroupResource an action group resource. -type ActionGroupResource struct { - autorest.Response `json:"-"` - // ActionGroup - The action groups properties of the resource. - *ActionGroup `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for ActionGroupResource. -func (agr ActionGroupResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if agr.ActionGroup != nil { - objectMap["properties"] = agr.ActionGroup - } - if agr.Location != nil { - objectMap["location"] = agr.Location - } - if agr.Tags != nil { - objectMap["tags"] = agr.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ActionGroupResource struct. -func (agr *ActionGroupResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var actionGroup ActionGroup - err = json.Unmarshal(*v, &actionGroup) - if err != nil { - return err - } - agr.ActionGroup = &actionGroup - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - agr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - agr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - agr.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - agr.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - agr.Tags = tags - } - } - } - - return nil -} - -// ActivityLogAlert an Azure activity log alert. -type ActivityLogAlert struct { - // Scopes - A list of resourceIds that will be used as prefixes. The alert will only apply to activityLogs with resourceIds that fall under one of these prefixes. This list must include at least one item. - Scopes *[]string `json:"scopes,omitempty"` - // Enabled - Indicates whether this activity log alert is enabled. If an activity log alert is not enabled, then none of its actions will be activated. - Enabled *bool `json:"enabled,omitempty"` - // Condition - The condition that will cause this alert to activate. - Condition *ActivityLogAlertAllOfCondition `json:"condition,omitempty"` - // Actions - The actions that will activate when the condition is met. - Actions *ActivityLogAlertActionList `json:"actions,omitempty"` - // Description - A description of this activity log alert. - Description *string `json:"description,omitempty"` -} - -// ActivityLogAlertActionGroup a pointer to an Azure Action Group. -type ActivityLogAlertActionGroup struct { - // ActionGroupID - The resourceId of the action group. This cannot be null or empty. - ActionGroupID *string `json:"actionGroupId,omitempty"` - // WebhookProperties - the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload. - WebhookProperties map[string]*string `json:"webhookProperties"` -} - -// MarshalJSON is the custom marshaler for ActivityLogAlertActionGroup. -func (alaag ActivityLogAlertActionGroup) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if alaag.ActionGroupID != nil { - objectMap["actionGroupId"] = alaag.ActionGroupID - } - if alaag.WebhookProperties != nil { - objectMap["webhookProperties"] = alaag.WebhookProperties - } - return json.Marshal(objectMap) -} - -// ActivityLogAlertActionList a list of activity log alert actions. -type ActivityLogAlertActionList struct { - // ActionGroups - The list of activity log alerts. - ActionGroups *[]ActivityLogAlertActionGroup `json:"actionGroups,omitempty"` -} - -// ActivityLogAlertAllOfCondition an Activity Log alert condition that is met when all its member -// conditions are met. -type ActivityLogAlertAllOfCondition struct { - // AllOf - The list of activity log alert conditions. - AllOf *[]ActivityLogAlertLeafCondition `json:"allOf,omitempty"` -} - -// ActivityLogAlertLeafCondition an Activity Log alert condition that is met by comparing an activity log -// field and value. -type ActivityLogAlertLeafCondition struct { - // Field - The name of the field that this condition will examine. The possible values for this field are (case-insensitive): 'resourceId', 'category', 'caller', 'level', 'operationName', 'resourceGroup', 'resourceProvider', 'status', 'subStatus', 'resourceType', or anything beginning with 'properties.'. - Field *string `json:"field,omitempty"` - // Equals - The field value will be compared to this value (case-insensitive) to determine if the condition is met. - Equals *string `json:"equals,omitempty"` -} - -// ActivityLogAlertList a list of activity log alerts. -type ActivityLogAlertList struct { - autorest.Response `json:"-"` - // Value - The list of activity log alerts. - Value *[]ActivityLogAlertResource `json:"value,omitempty"` - // NextLink - Provides the link to retrieve the next set of elements. - NextLink *string `json:"nextLink,omitempty"` -} - -// ActivityLogAlertPatch an Azure activity log alert for patch operations. -type ActivityLogAlertPatch struct { - // Enabled - Indicates whether this activity log alert is enabled. If an activity log alert is not enabled, then none of its actions will be activated. - Enabled *bool `json:"enabled,omitempty"` -} - -// ActivityLogAlertPatchBody an activity log alert object for the body of patch operations. -type ActivityLogAlertPatchBody struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // ActivityLogAlertPatch - The activity log alert settings for an update operation. - *ActivityLogAlertPatch `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for ActivityLogAlertPatchBody. -func (alapb ActivityLogAlertPatchBody) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if alapb.Tags != nil { - objectMap["tags"] = alapb.Tags - } - if alapb.ActivityLogAlertPatch != nil { - objectMap["properties"] = alapb.ActivityLogAlertPatch - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ActivityLogAlertPatchBody struct. -func (alapb *ActivityLogAlertPatchBody) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - alapb.Tags = tags - } - case "properties": - if v != nil { - var activityLogAlertPatch ActivityLogAlertPatch - err = json.Unmarshal(*v, &activityLogAlertPatch) - if err != nil { - return err - } - alapb.ActivityLogAlertPatch = &activityLogAlertPatch - } - } - } - - return nil -} - -// ActivityLogAlertResource an activity log alert resource. -type ActivityLogAlertResource struct { - autorest.Response `json:"-"` - // ActivityLogAlert - The activity log alert properties of the resource. - *ActivityLogAlert `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for ActivityLogAlertResource. -func (alar ActivityLogAlertResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if alar.ActivityLogAlert != nil { - objectMap["properties"] = alar.ActivityLogAlert - } - if alar.Location != nil { - objectMap["location"] = alar.Location - } - if alar.Tags != nil { - objectMap["tags"] = alar.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for ActivityLogAlertResource struct. -func (alar *ActivityLogAlertResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var activityLogAlert ActivityLogAlert - err = json.Unmarshal(*v, &activityLogAlert) - if err != nil { - return err - } - alar.ActivityLogAlert = &activityLogAlert - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - alar.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - alar.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - alar.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - alar.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - alar.Tags = tags - } - } - } - - return nil -} - -// AlertingAction specify action need to be taken when rule type is Alert -type AlertingAction struct { - // Severity - Severity of the alert. Possible values include: 'Zero', 'One', 'Two', 'Three', 'Four' - Severity AlertSeverity `json:"severity,omitempty"` - // AznsAction - Azure action group reference. - AznsAction *AzNsActionGroup `json:"aznsAction,omitempty"` - // ThrottlingInMin - time (in minutes) for which Alerts should be throttled or suppressed. - ThrottlingInMin *int32 `json:"throttlingInMin,omitempty"` - // Trigger - The trigger condition that results in the alert rule being. - Trigger *TriggerCondition `json:"trigger,omitempty"` - // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction' - OdataType OdataTypeBasicAction `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for AlertingAction. -func (aa AlertingAction) MarshalJSON() ([]byte, error) { - aa.OdataType = OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction - objectMap := make(map[string]interface{}) - if aa.Severity != "" { - objectMap["severity"] = aa.Severity - } - if aa.AznsAction != nil { - objectMap["aznsAction"] = aa.AznsAction - } - if aa.ThrottlingInMin != nil { - objectMap["throttlingInMin"] = aa.ThrottlingInMin - } - if aa.Trigger != nil { - objectMap["trigger"] = aa.Trigger - } - if aa.OdataType != "" { - objectMap["odata.type"] = aa.OdataType - } - return json.Marshal(objectMap) -} - -// AsAlertingAction is the BasicAction implementation for AlertingAction. -func (aa AlertingAction) AsAlertingAction() (*AlertingAction, bool) { - return &aa, true -} - -// AsLogToMetricAction is the BasicAction implementation for AlertingAction. -func (aa AlertingAction) AsLogToMetricAction() (*LogToMetricAction, bool) { - return nil, false -} - -// AsAction is the BasicAction implementation for AlertingAction. -func (aa AlertingAction) AsAction() (*Action, bool) { - return nil, false -} - -// AsBasicAction is the BasicAction implementation for AlertingAction. -func (aa AlertingAction) AsBasicAction() (BasicAction, bool) { - return &aa, true -} - -// AlertRule an alert rule. -type AlertRule struct { - // Name - the name of the alert rule. - Name *string `json:"name,omitempty"` - // Description - the description of the alert rule that will be included in the alert email. - Description *string `json:"description,omitempty"` - // IsEnabled - the flag that indicates whether the alert rule is enabled. - IsEnabled *bool `json:"isEnabled,omitempty"` - // Condition - the condition that results in the alert rule being activated. - Condition BasicRuleCondition `json:"condition,omitempty"` - // Actions - the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved. - Actions *[]BasicRuleAction `json:"actions,omitempty"` - // LastUpdatedTime - READ-ONLY; Last time the rule was updated in ISO8601 format. - LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for AlertRule. -func (ar AlertRule) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if ar.Name != nil { - objectMap["name"] = ar.Name - } - if ar.Description != nil { - objectMap["description"] = ar.Description - } - if ar.IsEnabled != nil { - objectMap["isEnabled"] = ar.IsEnabled - } - objectMap["condition"] = ar.Condition - if ar.Actions != nil { - objectMap["actions"] = ar.Actions - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AlertRule struct. -func (ar *AlertRule) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - ar.Name = &name - } - case "description": - if v != nil { - var description string - err = json.Unmarshal(*v, &description) - if err != nil { - return err - } - ar.Description = &description - } - case "isEnabled": - if v != nil { - var isEnabled bool - err = json.Unmarshal(*v, &isEnabled) - if err != nil { - return err - } - ar.IsEnabled = &isEnabled - } - case "condition": - if v != nil { - condition, err := unmarshalBasicRuleCondition(*v) - if err != nil { - return err - } - ar.Condition = condition - } - case "actions": - if v != nil { - actions, err := unmarshalBasicRuleActionArray(*v) - if err != nil { - return err - } - ar.Actions = &actions - } - case "lastUpdatedTime": - if v != nil { - var lastUpdatedTime date.Time - err = json.Unmarshal(*v, &lastUpdatedTime) - if err != nil { - return err - } - ar.LastUpdatedTime = &lastUpdatedTime - } - } - } - - return nil -} - -// AlertRuleResource the alert rule resource. -type AlertRuleResource struct { - autorest.Response `json:"-"` - // AlertRule - The alert rule properties of the resource. - *AlertRule `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for AlertRuleResource. -func (arr AlertRuleResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if arr.AlertRule != nil { - objectMap["properties"] = arr.AlertRule - } - if arr.Location != nil { - objectMap["location"] = arr.Location - } - if arr.Tags != nil { - objectMap["tags"] = arr.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AlertRuleResource struct. -func (arr *AlertRuleResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var alertRule AlertRule - err = json.Unmarshal(*v, &alertRule) - if err != nil { - return err - } - arr.AlertRule = &alertRule - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - arr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - arr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - arr.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - arr.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - arr.Tags = tags - } - } - } - - return nil -} - -// AlertRuleResourceCollection represents a collection of alert rule resources. -type AlertRuleResourceCollection struct { - autorest.Response `json:"-"` - // Value - the values for the alert rule resources. - Value *[]AlertRuleResource `json:"value,omitempty"` -} - -// AlertRuleResourcePatch the alert rule object for patch operations. -type AlertRuleResourcePatch struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // AlertRule - The properties of an alert rule. - *AlertRule `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AlertRuleResourcePatch. -func (arrp AlertRuleResourcePatch) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if arrp.Tags != nil { - objectMap["tags"] = arrp.Tags - } - if arrp.AlertRule != nil { - objectMap["properties"] = arrp.AlertRule - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AlertRuleResourcePatch struct. -func (arrp *AlertRuleResourcePatch) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - arrp.Tags = tags - } - case "properties": - if v != nil { - var alertRule AlertRule - err = json.Unmarshal(*v, &alertRule) - if err != nil { - return err - } - arrp.AlertRule = &alertRule - } - } - } - - return nil -} - -// AutomationRunbookReceiver the Azure Automation Runbook notification receiver. -type AutomationRunbookReceiver struct { - // AutomationAccountID - The Azure automation account Id which holds this runbook and authenticate to Azure resource. - AutomationAccountID *string `json:"automationAccountId,omitempty"` - // RunbookName - The name for this runbook. - RunbookName *string `json:"runbookName,omitempty"` - // WebhookResourceID - The resource id for webhook linked to this runbook. - WebhookResourceID *string `json:"webhookResourceId,omitempty"` - // IsGlobalRunbook - Indicates whether this instance is global runbook. - IsGlobalRunbook *bool `json:"isGlobalRunbook,omitempty"` - // Name - Indicates name of the webhook. - Name *string `json:"name,omitempty"` - // ServiceURI - The URI where webhooks should be sent. - ServiceURI *string `json:"serviceUri,omitempty"` -} - -// AutoscaleNotification autoscale notification. -type AutoscaleNotification struct { - // Operation - the operation associated with the notification and its value must be "scale" - Operation *string `json:"operation,omitempty"` - // Email - the email notification. - Email *EmailNotification `json:"email,omitempty"` - // Webhooks - the collection of webhook notifications. - Webhooks *[]WebhookNotification `json:"webhooks,omitempty"` -} - -// AutoscaleProfile autoscale profile. -type AutoscaleProfile struct { - // Name - the name of the profile. - Name *string `json:"name,omitempty"` - // Capacity - the number of instances that can be used during this profile. - Capacity *ScaleCapacity `json:"capacity,omitempty"` - // Rules - the collection of rules that provide the triggers and parameters for the scaling action. A maximum of 10 rules can be specified. - Rules *[]ScaleRule `json:"rules,omitempty"` - // FixedDate - the specific date-time for the profile. This element is not used if the Recurrence element is used. - FixedDate *TimeWindow `json:"fixedDate,omitempty"` - // Recurrence - the repeating times at which this profile begins. This element is not used if the FixedDate element is used. - Recurrence *Recurrence `json:"recurrence,omitempty"` -} - -// AutoscaleSetting a setting that contains all of the configuration for the automatic scaling of a -// resource. -type AutoscaleSetting struct { - // Profiles - the collection of automatic scaling profiles that specify different scaling parameters for different time periods. A maximum of 20 profiles can be specified. - Profiles *[]AutoscaleProfile `json:"profiles,omitempty"` - // Notifications - the collection of notifications. - Notifications *[]AutoscaleNotification `json:"notifications,omitempty"` - // Enabled - the enabled flag. Specifies whether automatic scaling is enabled for the resource. The default value is 'true'. - Enabled *bool `json:"enabled,omitempty"` - // Name - the name of the autoscale setting. - Name *string `json:"name,omitempty"` - // TargetResourceURI - the resource identifier of the resource that the autoscale setting should be added to. - TargetResourceURI *string `json:"targetResourceUri,omitempty"` -} - -// AutoscaleSettingResource the autoscale setting resource. -type AutoscaleSettingResource struct { - autorest.Response `json:"-"` - // AutoscaleSetting - The autoscale setting of the resource. - *AutoscaleSetting `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for AutoscaleSettingResource. -func (asr AutoscaleSettingResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if asr.AutoscaleSetting != nil { - objectMap["properties"] = asr.AutoscaleSetting - } - if asr.Location != nil { - objectMap["location"] = asr.Location - } - if asr.Tags != nil { - objectMap["tags"] = asr.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AutoscaleSettingResource struct. -func (asr *AutoscaleSettingResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var autoscaleSetting AutoscaleSetting - err = json.Unmarshal(*v, &autoscaleSetting) - if err != nil { - return err - } - asr.AutoscaleSetting = &autoscaleSetting - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - asr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - asr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - asr.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - asr.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - asr.Tags = tags - } - } - } - - return nil -} - -// AutoscaleSettingResourceCollection represents a collection of autoscale setting resources. -type AutoscaleSettingResourceCollection struct { - autorest.Response `json:"-"` - // Value - the values for the autoscale setting resources. - Value *[]AutoscaleSettingResource `json:"value,omitempty"` - // NextLink - URL to get the next set of results. - NextLink *string `json:"nextLink,omitempty"` -} - -// AutoscaleSettingResourceCollectionIterator provides access to a complete listing of -// AutoscaleSettingResource values. -type AutoscaleSettingResourceCollectionIterator struct { - i int - page AutoscaleSettingResourceCollectionPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *AutoscaleSettingResourceCollectionIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingResourceCollectionIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *AutoscaleSettingResourceCollectionIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter AutoscaleSettingResourceCollectionIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter AutoscaleSettingResourceCollectionIterator) Response() AutoscaleSettingResourceCollection { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter AutoscaleSettingResourceCollectionIterator) Value() AutoscaleSettingResource { - if !iter.page.NotDone() { - return AutoscaleSettingResource{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the AutoscaleSettingResourceCollectionIterator type. -func NewAutoscaleSettingResourceCollectionIterator(page AutoscaleSettingResourceCollectionPage) AutoscaleSettingResourceCollectionIterator { - return AutoscaleSettingResourceCollectionIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (asrc AutoscaleSettingResourceCollection) IsEmpty() bool { - return asrc.Value == nil || len(*asrc.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (asrc AutoscaleSettingResourceCollection) hasNextLink() bool { - return asrc.NextLink != nil && len(*asrc.NextLink) != 0 -} - -// autoscaleSettingResourceCollectionPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (asrc AutoscaleSettingResourceCollection) autoscaleSettingResourceCollectionPreparer(ctx context.Context) (*http.Request, error) { - if !asrc.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(asrc.NextLink))) -} - -// AutoscaleSettingResourceCollectionPage contains a page of AutoscaleSettingResource values. -type AutoscaleSettingResourceCollectionPage struct { - fn func(context.Context, AutoscaleSettingResourceCollection) (AutoscaleSettingResourceCollection, error) - asrc AutoscaleSettingResourceCollection -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *AutoscaleSettingResourceCollectionPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingResourceCollectionPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.asrc) - if err != nil { - return err - } - page.asrc = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *AutoscaleSettingResourceCollectionPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page AutoscaleSettingResourceCollectionPage) NotDone() bool { - return !page.asrc.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page AutoscaleSettingResourceCollectionPage) Response() AutoscaleSettingResourceCollection { - return page.asrc -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page AutoscaleSettingResourceCollectionPage) Values() []AutoscaleSettingResource { - if page.asrc.IsEmpty() { - return nil - } - return *page.asrc.Value -} - -// Creates a new instance of the AutoscaleSettingResourceCollectionPage type. -func NewAutoscaleSettingResourceCollectionPage(cur AutoscaleSettingResourceCollection, getNextPage func(context.Context, AutoscaleSettingResourceCollection) (AutoscaleSettingResourceCollection, error)) AutoscaleSettingResourceCollectionPage { - return AutoscaleSettingResourceCollectionPage{ - fn: getNextPage, - asrc: cur, - } -} - -// AutoscaleSettingResourcePatch the autoscale setting object for patch operations. -type AutoscaleSettingResourcePatch struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // AutoscaleSetting - The autoscale setting properties of the update operation. - *AutoscaleSetting `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for AutoscaleSettingResourcePatch. -func (asrp AutoscaleSettingResourcePatch) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if asrp.Tags != nil { - objectMap["tags"] = asrp.Tags - } - if asrp.AutoscaleSetting != nil { - objectMap["properties"] = asrp.AutoscaleSetting - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for AutoscaleSettingResourcePatch struct. -func (asrp *AutoscaleSettingResourcePatch) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - asrp.Tags = tags - } - case "properties": - if v != nil { - var autoscaleSetting AutoscaleSetting - err = json.Unmarshal(*v, &autoscaleSetting) - if err != nil { - return err - } - asrp.AutoscaleSetting = &autoscaleSetting - } - } - } - - return nil -} - -// AzNsActionGroup azure action group -type AzNsActionGroup struct { - // ActionGroup - Azure Action Group reference. - ActionGroup *[]string `json:"actionGroup,omitempty"` - // EmailSubject - Custom subject override for all email ids in Azure action group - EmailSubject *string `json:"emailSubject,omitempty"` - // CustomWebhookPayload - Custom payload to be sent for all webhook URI in Azure action group - CustomWebhookPayload *string `json:"customWebhookPayload,omitempty"` -} - -// AzureAppPushReceiver the Azure mobile App push notification receiver. -type AzureAppPushReceiver struct { - // Name - The name of the Azure mobile app push receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // EmailAddress - The email address registered for the Azure mobile app. - EmailAddress *string `json:"emailAddress,omitempty"` -} - -// AzureFunctionReceiver an azure function receiver. -type AzureFunctionReceiver struct { - // Name - The name of the azure function receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // FunctionAppResourceID - The azure resource id of the function app. - FunctionAppResourceID *string `json:"functionAppResourceId,omitempty"` - // FunctionName - The function name in the function app. - FunctionName *string `json:"functionName,omitempty"` - // HTTPTriggerURL - The http trigger url where http request sent to. - HTTPTriggerURL *string `json:"httpTriggerUrl,omitempty"` -} - -// Baseline the baseline values for a single sensitivity value. -type Baseline struct { - // Sensitivity - the sensitivity of the baseline. Possible values include: 'SensitivityLow', 'SensitivityMedium', 'SensitivityHigh' - Sensitivity Sensitivity `json:"sensitivity,omitempty"` - // LowThresholds - The low thresholds of the baseline. - LowThresholds *[]float64 `json:"lowThresholds,omitempty"` - // HighThresholds - The high thresholds of the baseline. - HighThresholds *[]float64 `json:"highThresholds,omitempty"` -} - -// BaselineMetadataValue represents a baseline metadata value. -type BaselineMetadataValue struct { - // Name - the name of the metadata. - Name *LocalizableString `json:"name,omitempty"` - // Value - the value of the metadata. - Value *string `json:"value,omitempty"` -} - -// BaselineProperties the baseline properties class. -type BaselineProperties struct { - // Timespan - The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested. - Timespan *string `json:"timespan,omitempty"` - // Interval - The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back from what was originally requested. This is not present if a metadata request was made. - Interval *string `json:"interval,omitempty"` - // Aggregation - The aggregation type of the metric. - Aggregation *string `json:"aggregation,omitempty"` - // Timestamps - the array of timestamps of the baselines. - Timestamps *[]date.Time `json:"timestamps,omitempty"` - // Baseline - the baseline values for each sensitivity. - Baseline *[]Baseline `json:"baseline,omitempty"` - // Metadata - the baseline metadata values. - Metadata *[]BaselineMetadataValue `json:"metadata,omitempty"` -} - -// BaselineResponse the response to a baseline query. -type BaselineResponse struct { - autorest.Response `json:"-"` - // ID - READ-ONLY; the metric baseline Id. - ID *string `json:"id,omitempty"` - // Type - READ-ONLY; the resource type of the baseline resource. - Type *string `json:"type,omitempty"` - // Name - READ-ONLY; the name and the display name of the metric, i.e. it is localizable string. - Name *LocalizableString `json:"name,omitempty"` - // BaselineProperties - the properties of the baseline. - *BaselineProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for BaselineResponse. -func (br BaselineResponse) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if br.BaselineProperties != nil { - objectMap["properties"] = br.BaselineProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for BaselineResponse struct. -func (br *BaselineResponse) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - br.ID = &ID - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - br.Type = &typeVar - } - case "name": - if v != nil { - var name LocalizableString - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - br.Name = &name - } - case "properties": - if v != nil { - var baselineProperties BaselineProperties - err = json.Unmarshal(*v, &baselineProperties) - if err != nil { - return err - } - br.BaselineProperties = &baselineProperties - } - } - } - - return nil -} - -// CalculateBaselineResponse the response to a calculate baseline call. -type CalculateBaselineResponse struct { - autorest.Response `json:"-"` - // Type - the resource type of the baseline resource. - Type *string `json:"type,omitempty"` - // Timestamps - the array of timestamps of the baselines. - Timestamps *[]date.Time `json:"timestamps,omitempty"` - // Baseline - the baseline values for each sensitivity. - Baseline *[]Baseline `json:"baseline,omitempty"` -} - -// Criteria specifies the criteria for converting log to metric. -type Criteria struct { - // MetricName - Name of the metric - MetricName *string `json:"metricName,omitempty"` - // Dimensions - List of Dimensions for creating metric - Dimensions *[]Dimension `json:"dimensions,omitempty"` -} - -// DiagnosticSettings the diagnostic settings. -type DiagnosticSettings struct { - // StorageAccountID - The resource ID of the storage account to which you would like to send Diagnostic Logs. - StorageAccountID *string `json:"storageAccountId,omitempty"` - // ServiceBusRuleID - The service bus rule Id of the diagnostic setting. This is here to maintain backwards compatibility. - ServiceBusRuleID *string `json:"serviceBusRuleId,omitempty"` - // EventHubAuthorizationRuleID - The resource Id for the event hub authorization rule. - EventHubAuthorizationRuleID *string `json:"eventHubAuthorizationRuleId,omitempty"` - // EventHubName - The name of the event hub. If none is specified, the default event hub will be selected. - EventHubName *string `json:"eventHubName,omitempty"` - // Metrics - The list of metric settings. - Metrics *[]MetricSettings `json:"metrics,omitempty"` - // Logs - The list of logs settings. - Logs *[]LogSettings `json:"logs,omitempty"` - // WorkspaceID - The full ARM resource ID of the Log Analytics workspace to which you would like to send Diagnostic Logs. Example: /subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2 - WorkspaceID *string `json:"workspaceId,omitempty"` - // LogAnalyticsDestinationType - A string indicating whether the export to Log Analytics should use the default destination type, i.e. AzureDiagnostics, or use a destination type constructed as follows: _. Possible values are: Dedicated and null (null is default.) - LogAnalyticsDestinationType *string `json:"logAnalyticsDestinationType,omitempty"` -} - -// DiagnosticSettingsCategory the diagnostic settings Category. -type DiagnosticSettingsCategory struct { - // CategoryType - The type of the diagnostic settings category. Possible values include: 'Metrics', 'Logs' - CategoryType CategoryType `json:"categoryType,omitempty"` -} - -// DiagnosticSettingsCategoryResource the diagnostic settings category resource. -type DiagnosticSettingsCategoryResource struct { - autorest.Response `json:"-"` - // DiagnosticSettingsCategory - The properties of a Diagnostic Settings Category. - *DiagnosticSettingsCategory `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for DiagnosticSettingsCategoryResource. -func (dscr DiagnosticSettingsCategoryResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dscr.DiagnosticSettingsCategory != nil { - objectMap["properties"] = dscr.DiagnosticSettingsCategory - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DiagnosticSettingsCategoryResource struct. -func (dscr *DiagnosticSettingsCategoryResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var diagnosticSettingsCategory DiagnosticSettingsCategory - err = json.Unmarshal(*v, &diagnosticSettingsCategory) - if err != nil { - return err - } - dscr.DiagnosticSettingsCategory = &diagnosticSettingsCategory - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - dscr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dscr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - dscr.Type = &typeVar - } - } - } - - return nil -} - -// DiagnosticSettingsCategoryResourceCollection represents a collection of diagnostic setting category -// resources. -type DiagnosticSettingsCategoryResourceCollection struct { - autorest.Response `json:"-"` - // Value - The collection of diagnostic settings category resources. - Value *[]DiagnosticSettingsCategoryResource `json:"value,omitempty"` -} - -// DiagnosticSettingsResource the diagnostic setting resource. -type DiagnosticSettingsResource struct { - autorest.Response `json:"-"` - // DiagnosticSettings - Properties of a Diagnostic Settings Resource. - *DiagnosticSettings `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for DiagnosticSettingsResource. -func (dsr DiagnosticSettingsResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if dsr.DiagnosticSettings != nil { - objectMap["properties"] = dsr.DiagnosticSettings - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for DiagnosticSettingsResource struct. -func (dsr *DiagnosticSettingsResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var diagnosticSettings DiagnosticSettings - err = json.Unmarshal(*v, &diagnosticSettings) - if err != nil { - return err - } - dsr.DiagnosticSettings = &diagnosticSettings - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - dsr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dsr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - dsr.Type = &typeVar - } - } - } - - return nil -} - -// DiagnosticSettingsResourceCollection represents a collection of alert rule resources. -type DiagnosticSettingsResourceCollection struct { - autorest.Response `json:"-"` - // Value - The collection of diagnostic settings resources;. - Value *[]DiagnosticSettingsResource `json:"value,omitempty"` -} - -// Dimension specifies the criteria for converting log to metric. -type Dimension struct { - // Name - Name of the dimension - Name *string `json:"name,omitempty"` - // Operator - Operator for dimension values - Operator *string `json:"operator,omitempty"` - // Values - List of dimension values - Values *[]string `json:"values,omitempty"` -} - -// DynamicMetricCriteria criterion for dynamic threshold. -type DynamicMetricCriteria struct { - // Operator - The operator used to compare the metric value against the threshold. Possible values include: 'DynamicThresholdOperatorGreaterThan', 'DynamicThresholdOperatorLessThan', 'DynamicThresholdOperatorGreaterOrLessThan' - Operator DynamicThresholdOperator `json:"operator,omitempty"` - // AlertSensitivity - The extent of deviation required to trigger an alert. This will affect how tight the threshold is to the metric series pattern. Possible values include: 'Low', 'Medium', 'High' - AlertSensitivity DynamicThresholdSensitivity `json:"alertSensitivity,omitempty"` - // FailingPeriods - The minimum number of violations required within the selected lookback time window required to raise an alert. - FailingPeriods *DynamicThresholdFailingPeriods `json:"failingPeriods,omitempty"` - // IgnoreDataBefore - Use this option to set the date from which to start learning the metric historical data and calculate the dynamic thresholds (in ISO8601 format) - IgnoreDataBefore *date.Time `json:"ignoreDataBefore,omitempty"` - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // Name - Name of the criteria. - Name *string `json:"name,omitempty"` - // MetricName - Name of the metric. - MetricName *string `json:"metricName,omitempty"` - // MetricNamespace - Namespace of the metric. - MetricNamespace *string `json:"metricNamespace,omitempty"` - // TimeAggregation - the criteria time aggregation types. - TimeAggregation interface{} `json:"timeAggregation,omitempty"` - // Dimensions - List of dimension conditions. - Dimensions *[]MetricDimension `json:"dimensions,omitempty"` - // SkipMetricValidation - Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped. - SkipMetricValidation *bool `json:"skipMetricValidation,omitempty"` - // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion' - CriterionType CriterionType `json:"criterionType,omitempty"` -} - -// MarshalJSON is the custom marshaler for DynamicMetricCriteria. -func (dmc DynamicMetricCriteria) MarshalJSON() ([]byte, error) { - dmc.CriterionType = CriterionTypeDynamicThresholdCriterion - objectMap := make(map[string]interface{}) - if dmc.Operator != "" { - objectMap["operator"] = dmc.Operator - } - if dmc.AlertSensitivity != "" { - objectMap["alertSensitivity"] = dmc.AlertSensitivity - } - if dmc.FailingPeriods != nil { - objectMap["failingPeriods"] = dmc.FailingPeriods - } - if dmc.IgnoreDataBefore != nil { - objectMap["ignoreDataBefore"] = dmc.IgnoreDataBefore - } - if dmc.Name != nil { - objectMap["name"] = dmc.Name - } - if dmc.MetricName != nil { - objectMap["metricName"] = dmc.MetricName - } - if dmc.MetricNamespace != nil { - objectMap["metricNamespace"] = dmc.MetricNamespace - } - if dmc.TimeAggregation != nil { - objectMap["timeAggregation"] = dmc.TimeAggregation - } - if dmc.Dimensions != nil { - objectMap["dimensions"] = dmc.Dimensions - } - if dmc.SkipMetricValidation != nil { - objectMap["skipMetricValidation"] = dmc.SkipMetricValidation - } - if dmc.CriterionType != "" { - objectMap["criterionType"] = dmc.CriterionType - } - for k, v := range dmc.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// AsMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. -func (dmc DynamicMetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) { - return nil, false -} - -// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. -func (dmc DynamicMetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) { - return &dmc, true -} - -// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. -func (dmc DynamicMetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) { - return nil, false -} - -// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. -func (dmc DynamicMetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) { - return &dmc, true -} - -// UnmarshalJSON is the custom unmarshaler for DynamicMetricCriteria struct. -func (dmc *DynamicMetricCriteria) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "operator": - if v != nil { - var operator DynamicThresholdOperator - err = json.Unmarshal(*v, &operator) - if err != nil { - return err - } - dmc.Operator = operator - } - case "alertSensitivity": - if v != nil { - var alertSensitivity DynamicThresholdSensitivity - err = json.Unmarshal(*v, &alertSensitivity) - if err != nil { - return err - } - dmc.AlertSensitivity = alertSensitivity - } - case "failingPeriods": - if v != nil { - var failingPeriods DynamicThresholdFailingPeriods - err = json.Unmarshal(*v, &failingPeriods) - if err != nil { - return err - } - dmc.FailingPeriods = &failingPeriods - } - case "ignoreDataBefore": - if v != nil { - var ignoreDataBefore date.Time - err = json.Unmarshal(*v, &ignoreDataBefore) - if err != nil { - return err - } - dmc.IgnoreDataBefore = &ignoreDataBefore - } - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if dmc.AdditionalProperties == nil { - dmc.AdditionalProperties = make(map[string]interface{}) - } - dmc.AdditionalProperties[k] = additionalProperties - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - dmc.Name = &name - } - case "metricName": - if v != nil { - var metricName string - err = json.Unmarshal(*v, &metricName) - if err != nil { - return err - } - dmc.MetricName = &metricName - } - case "metricNamespace": - if v != nil { - var metricNamespace string - err = json.Unmarshal(*v, &metricNamespace) - if err != nil { - return err - } - dmc.MetricNamespace = &metricNamespace - } - case "timeAggregation": - if v != nil { - var timeAggregation interface{} - err = json.Unmarshal(*v, &timeAggregation) - if err != nil { - return err - } - dmc.TimeAggregation = timeAggregation - } - case "dimensions": - if v != nil { - var dimensions []MetricDimension - err = json.Unmarshal(*v, &dimensions) - if err != nil { - return err - } - dmc.Dimensions = &dimensions - } - case "skipMetricValidation": - if v != nil { - var skipMetricValidation bool - err = json.Unmarshal(*v, &skipMetricValidation) - if err != nil { - return err - } - dmc.SkipMetricValidation = &skipMetricValidation - } - case "criterionType": - if v != nil { - var criterionType CriterionType - err = json.Unmarshal(*v, &criterionType) - if err != nil { - return err - } - dmc.CriterionType = criterionType - } - } - } - - return nil -} - -// DynamicThresholdFailingPeriods the minimum number of violations required within the selected lookback -// time window required to raise an alert. -type DynamicThresholdFailingPeriods struct { - // NumberOfEvaluationPeriods - The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (windowSize) and the selected number of aggregated points. - NumberOfEvaluationPeriods *float64 `json:"numberOfEvaluationPeriods,omitempty"` - // MinFailingPeriodsToAlert - The number of violations to trigger an alert. Should be smaller or equal to numberOfEvaluationPeriods. - MinFailingPeriodsToAlert *float64 `json:"minFailingPeriodsToAlert,omitempty"` -} - -// EmailNotification email notification of an autoscale event. -type EmailNotification struct { - // SendToSubscriptionAdministrator - a value indicating whether to send email to subscription administrator. - SendToSubscriptionAdministrator *bool `json:"sendToSubscriptionAdministrator,omitempty"` - // SendToSubscriptionCoAdministrators - a value indicating whether to send email to subscription co-administrators. - SendToSubscriptionCoAdministrators *bool `json:"sendToSubscriptionCoAdministrators,omitempty"` - // CustomEmails - the custom e-mails list. This value can be null or empty, in which case this attribute will be ignored. - CustomEmails *[]string `json:"customEmails,omitempty"` -} - -// EmailReceiver an email receiver. -type EmailReceiver struct { - // Name - The name of the email receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // EmailAddress - The email address of this receiver. - EmailAddress *string `json:"emailAddress,omitempty"` - // Status - READ-ONLY; The receiver status of the e-mail. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled' - Status ReceiverStatus `json:"status,omitempty"` -} - -// MarshalJSON is the custom marshaler for EmailReceiver. -func (er EmailReceiver) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if er.Name != nil { - objectMap["name"] = er.Name - } - if er.EmailAddress != nil { - objectMap["emailAddress"] = er.EmailAddress - } - return json.Marshal(objectMap) -} - -// EnableRequest describes a receiver that should be resubscribed. -type EnableRequest struct { - // ReceiverName - The name of the receiver to resubscribe. - ReceiverName *string `json:"receiverName,omitempty"` -} - -// ErrorResponse describes the format of Error response. -type ErrorResponse struct { - // Code - Error code - Code *string `json:"code,omitempty"` - // Message - Error message indicating why the operation failed. - Message *string `json:"message,omitempty"` -} - -// EventCategoryCollection a collection of event categories. Currently possible values are: Administrative, -// Security, ServiceHealth, Alert, Recommendation, Policy. -type EventCategoryCollection struct { - autorest.Response `json:"-"` - // Value - the list that includes the Azure event categories. - Value *[]LocalizableString `json:"value,omitempty"` -} - -// EventData the Azure event log entries are of type EventData -type EventData struct { - // Authorization - READ-ONLY; The sender authorization information. - Authorization *SenderAuthorization `json:"authorization,omitempty"` - // Claims - READ-ONLY; key value pairs to identify ARM permissions. - Claims map[string]*string `json:"claims"` - // Caller - READ-ONLY; the email address of the user who has performed the operation, the UPN claim or SPN claim based on availability. - Caller *string `json:"caller,omitempty"` - // Description - READ-ONLY; the description of the event. - Description *string `json:"description,omitempty"` - // ID - READ-ONLY; the Id of this event as required by ARM for RBAC. It contains the EventDataID and a timestamp information. - ID *string `json:"id,omitempty"` - // EventDataID - READ-ONLY; the event data Id. This is a unique identifier for an event. - EventDataID *string `json:"eventDataId,omitempty"` - // CorrelationID - READ-ONLY; the correlation Id, usually a GUID in the string format. The correlation Id is shared among the events that belong to the same uber operation. - CorrelationID *string `json:"correlationId,omitempty"` - // EventName - READ-ONLY; the event name. This value should not be confused with OperationName. For practical purposes, OperationName might be more appealing to end users. - EventName *LocalizableString `json:"eventName,omitempty"` - // Category - READ-ONLY; the event category. - Category *LocalizableString `json:"category,omitempty"` - // HTTPRequest - READ-ONLY; the HTTP request info. Usually includes the 'clientRequestId', 'clientIpAddress' (IP address of the user who initiated the event) and 'method' (HTTP method e.g. PUT). - HTTPRequest *HTTPRequestInfo `json:"httpRequest,omitempty"` - // Level - READ-ONLY; the event level. Possible values include: 'Critical', 'Error', 'Warning', 'Informational', 'Verbose' - Level EventLevel `json:"level,omitempty"` - // ResourceGroupName - READ-ONLY; the resource group name of the impacted resource. - ResourceGroupName *string `json:"resourceGroupName,omitempty"` - // ResourceProviderName - READ-ONLY; the resource provider name of the impacted resource. - ResourceProviderName *LocalizableString `json:"resourceProviderName,omitempty"` - // ResourceID - READ-ONLY; the resource uri that uniquely identifies the resource that caused this event. - ResourceID *string `json:"resourceId,omitempty"` - // ResourceType - READ-ONLY; the resource type - ResourceType *LocalizableString `json:"resourceType,omitempty"` - // OperationID - READ-ONLY; It is usually a GUID shared among the events corresponding to single operation. This value should not be confused with EventName. - OperationID *string `json:"operationId,omitempty"` - // OperationName - READ-ONLY; the operation name. - OperationName *LocalizableString `json:"operationName,omitempty"` - // Properties - READ-ONLY; the set of pairs (usually a Dictionary) that includes details about the event. - Properties map[string]*string `json:"properties"` - // Status - READ-ONLY; a string describing the status of the operation. Some typical values are: Started, In progress, Succeeded, Failed, Resolved. - Status *LocalizableString `json:"status,omitempty"` - // SubStatus - READ-ONLY; the event sub status. Most of the time, when included, this captures the HTTP status code of the REST call. Common values are: OK (HTTP Status Code: 200), Created (HTTP Status Code: 201), Accepted (HTTP Status Code: 202), No Content (HTTP Status Code: 204), Bad Request(HTTP Status Code: 400), Not Found (HTTP Status Code: 404), Conflict (HTTP Status Code: 409), Internal Server Error (HTTP Status Code: 500), Service Unavailable (HTTP Status Code:503), Gateway Timeout (HTTP Status Code: 504) - SubStatus *LocalizableString `json:"subStatus,omitempty"` - // EventTimestamp - READ-ONLY; the timestamp of when the event was generated by the Azure service processing the request corresponding the event. It in ISO 8601 format. - EventTimestamp *date.Time `json:"eventTimestamp,omitempty"` - // SubmissionTimestamp - READ-ONLY; the timestamp of when the event became available for querying via this API. It is in ISO 8601 format. This value should not be confused eventTimestamp. As there might be a delay between the occurrence time of the event, and the time that the event is submitted to the Azure logging infrastructure. - SubmissionTimestamp *date.Time `json:"submissionTimestamp,omitempty"` - // SubscriptionID - READ-ONLY; the Azure subscription Id usually a GUID. - SubscriptionID *string `json:"subscriptionId,omitempty"` - // TenantID - READ-ONLY; the Azure tenant Id - TenantID *string `json:"tenantId,omitempty"` -} - -// MarshalJSON is the custom marshaler for EventData. -func (ed EventData) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// EventDataCollection represents collection of events. -type EventDataCollection struct { - autorest.Response `json:"-"` - // Value - this list that includes the Azure audit logs. - Value *[]EventData `json:"value,omitempty"` - // NextLink - Provides the link to retrieve the next set of events. - NextLink *string `json:"nextLink,omitempty"` -} - -// EventDataCollectionIterator provides access to a complete listing of EventData values. -type EventDataCollectionIterator struct { - i int - page EventDataCollectionPage -} - -// NextWithContext advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -func (iter *EventDataCollectionIterator) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventDataCollectionIterator.NextWithContext") - defer func() { - sc := -1 - if iter.Response().Response.Response != nil { - sc = iter.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - iter.i++ - if iter.i < len(iter.page.Values()) { - return nil - } - err = iter.page.NextWithContext(ctx) - if err != nil { - iter.i-- - return err - } - iter.i = 0 - return nil -} - -// Next advances to the next value. If there was an error making -// the request the iterator does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (iter *EventDataCollectionIterator) Next() error { - return iter.NextWithContext(context.Background()) -} - -// NotDone returns true if the enumeration should be started or is not yet complete. -func (iter EventDataCollectionIterator) NotDone() bool { - return iter.page.NotDone() && iter.i < len(iter.page.Values()) -} - -// Response returns the raw server response from the last page request. -func (iter EventDataCollectionIterator) Response() EventDataCollection { - return iter.page.Response() -} - -// Value returns the current value or a zero-initialized value if the -// iterator has advanced beyond the end of the collection. -func (iter EventDataCollectionIterator) Value() EventData { - if !iter.page.NotDone() { - return EventData{} - } - return iter.page.Values()[iter.i] -} - -// Creates a new instance of the EventDataCollectionIterator type. -func NewEventDataCollectionIterator(page EventDataCollectionPage) EventDataCollectionIterator { - return EventDataCollectionIterator{page: page} -} - -// IsEmpty returns true if the ListResult contains no values. -func (edc EventDataCollection) IsEmpty() bool { - return edc.Value == nil || len(*edc.Value) == 0 -} - -// hasNextLink returns true if the NextLink is not empty. -func (edc EventDataCollection) hasNextLink() bool { - return edc.NextLink != nil && len(*edc.NextLink) != 0 -} - -// eventDataCollectionPreparer prepares a request to retrieve the next set of results. -// It returns nil if no more results exist. -func (edc EventDataCollection) eventDataCollectionPreparer(ctx context.Context) (*http.Request, error) { - if !edc.hasNextLink() { - return nil, nil - } - return autorest.Prepare((&http.Request{}).WithContext(ctx), - autorest.AsJSON(), - autorest.AsGet(), - autorest.WithBaseURL(to.String(edc.NextLink))) -} - -// EventDataCollectionPage contains a page of EventData values. -type EventDataCollectionPage struct { - fn func(context.Context, EventDataCollection) (EventDataCollection, error) - edc EventDataCollection -} - -// NextWithContext advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -func (page *EventDataCollectionPage) NextWithContext(ctx context.Context) (err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/EventDataCollectionPage.NextWithContext") - defer func() { - sc := -1 - if page.Response().Response.Response != nil { - sc = page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - for { - next, err := page.fn(ctx, page.edc) - if err != nil { - return err - } - page.edc = next - if !next.hasNextLink() || !next.IsEmpty() { - break - } - } - return nil -} - -// Next advances to the next page of values. If there was an error making -// the request the page does not advance and the error is returned. -// Deprecated: Use NextWithContext() instead. -func (page *EventDataCollectionPage) Next() error { - return page.NextWithContext(context.Background()) -} - -// NotDone returns true if the page enumeration should be started or is not yet complete. -func (page EventDataCollectionPage) NotDone() bool { - return !page.edc.IsEmpty() -} - -// Response returns the raw server response from the last page request. -func (page EventDataCollectionPage) Response() EventDataCollection { - return page.edc -} - -// Values returns the slice of values for the current page or nil if there are no values. -func (page EventDataCollectionPage) Values() []EventData { - if page.edc.IsEmpty() { - return nil - } - return *page.edc.Value -} - -// Creates a new instance of the EventDataCollectionPage type. -func NewEventDataCollectionPage(cur EventDataCollection, getNextPage func(context.Context, EventDataCollection) (EventDataCollection, error)) EventDataCollectionPage { - return EventDataCollectionPage{ - fn: getNextPage, - edc: cur, - } -} - -// HTTPRequestInfo the Http request info. -type HTTPRequestInfo struct { - // ClientRequestID - the client request id. - ClientRequestID *string `json:"clientRequestId,omitempty"` - // ClientIPAddress - the client Ip Address - ClientIPAddress *string `json:"clientIpAddress,omitempty"` - // Method - the Http request method. - Method *string `json:"method,omitempty"` - // URI - the Uri. - URI *string `json:"uri,omitempty"` -} - -// Incident an alert incident indicates the activation status of an alert rule. -type Incident struct { - autorest.Response `json:"-"` - // Name - READ-ONLY; Incident name. - Name *string `json:"name,omitempty"` - // RuleName - READ-ONLY; Rule name that is associated with the incident. - RuleName *string `json:"ruleName,omitempty"` - // IsActive - READ-ONLY; A boolean to indicate whether the incident is active or resolved. - IsActive *bool `json:"isActive,omitempty"` - // ActivatedTime - READ-ONLY; The time at which the incident was activated in ISO8601 format. - ActivatedTime *date.Time `json:"activatedTime,omitempty"` - // ResolvedTime - READ-ONLY; The time at which the incident was resolved in ISO8601 format. If null, it means the incident is still active. - ResolvedTime *date.Time `json:"resolvedTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for Incident. -func (i Incident) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// IncidentListResult the List incidents operation response. -type IncidentListResult struct { - autorest.Response `json:"-"` - // Value - the incident collection. - Value *[]Incident `json:"value,omitempty"` -} - -// ItsmReceiver an Itsm receiver. -type ItsmReceiver struct { - // Name - The name of the Itsm receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // WorkspaceID - OMS LA instance identifier. - WorkspaceID *string `json:"workspaceId,omitempty"` - // ConnectionID - Unique identification of ITSM connection among multiple defined in above workspace. - ConnectionID *string `json:"connectionId,omitempty"` - // TicketConfiguration - JSON blob for the configurations of the ITSM action. CreateMultipleWorkItems option will be part of this blob as well. - TicketConfiguration *string `json:"ticketConfiguration,omitempty"` - // Region - Region in which workspace resides. Supported values:'centralindia','japaneast','southeastasia','australiasoutheast','uksouth','westcentralus','canadacentral','eastus','westeurope' - Region *string `json:"region,omitempty"` -} - -// LocalizableString the localizable string class. -type LocalizableString struct { - // Value - the invariant value. - Value *string `json:"value,omitempty"` - // LocalizedValue - the locale specific value. - LocalizedValue *string `json:"localizedValue,omitempty"` -} - -// LocationThresholdRuleCondition a rule condition based on a certain number of locations failing. -type LocationThresholdRuleCondition struct { - // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day. - WindowSize *string `json:"windowSize,omitempty"` - // FailedLocationCount - the number of locations that must fail to activate the alert. - FailedLocationCount *int32 `json:"failedLocationCount,omitempty"` - // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. - DataSource BasicRuleDataSource `json:"dataSource,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' - OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for LocationThresholdRuleCondition. -func (ltrc LocationThresholdRuleCondition) MarshalJSON() ([]byte, error) { - ltrc.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition - objectMap := make(map[string]interface{}) - if ltrc.WindowSize != nil { - objectMap["windowSize"] = ltrc.WindowSize - } - if ltrc.FailedLocationCount != nil { - objectMap["failedLocationCount"] = ltrc.FailedLocationCount - } - objectMap["dataSource"] = ltrc.DataSource - if ltrc.OdataType != "" { - objectMap["odata.type"] = ltrc.OdataType - } - return json.Marshal(objectMap) -} - -// AsThresholdRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. -func (ltrc LocationThresholdRuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { - return nil, false -} - -// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. -func (ltrc LocationThresholdRuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { - return <rc, true -} - -// AsManagementEventRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. -func (ltrc LocationThresholdRuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { - return nil, false -} - -// AsRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. -func (ltrc LocationThresholdRuleCondition) AsRuleCondition() (*RuleCondition, bool) { - return nil, false -} - -// AsBasicRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. -func (ltrc LocationThresholdRuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { - return <rc, true -} - -// UnmarshalJSON is the custom unmarshaler for LocationThresholdRuleCondition struct. -func (ltrc *LocationThresholdRuleCondition) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "windowSize": - if v != nil { - var windowSize string - err = json.Unmarshal(*v, &windowSize) - if err != nil { - return err - } - ltrc.WindowSize = &windowSize - } - case "failedLocationCount": - if v != nil { - var failedLocationCount int32 - err = json.Unmarshal(*v, &failedLocationCount) - if err != nil { - return err - } - ltrc.FailedLocationCount = &failedLocationCount - } - case "dataSource": - if v != nil { - dataSource, err := unmarshalBasicRuleDataSource(*v) - if err != nil { - return err - } - ltrc.DataSource = dataSource - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicRuleCondition - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - ltrc.OdataType = odataType - } - } - } - - return nil -} - -// LogicAppReceiver a logic app receiver. -type LogicAppReceiver struct { - // Name - The name of the logic app receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // ResourceID - The azure resource id of the logic app receiver. - ResourceID *string `json:"resourceId,omitempty"` - // CallbackURL - The callback url where http request sent to. - CallbackURL *string `json:"callbackUrl,omitempty"` -} - -// LogMetricTrigger a log metrics trigger descriptor. -type LogMetricTrigger struct { - // ThresholdOperator - Evaluation operation for Metric -'GreaterThan' or 'LessThan' or 'Equal'. Possible values include: 'ConditionalOperatorGreaterThan', 'ConditionalOperatorLessThan', 'ConditionalOperatorEqual' - ThresholdOperator ConditionalOperator `json:"thresholdOperator,omitempty"` - // Threshold - The threshold of the metric trigger. - Threshold *float64 `json:"threshold,omitempty"` - // MetricTriggerType - Metric Trigger Type - 'Consecutive' or 'Total'. Possible values include: 'MetricTriggerTypeConsecutive', 'MetricTriggerTypeTotal' - MetricTriggerType MetricTriggerType `json:"metricTriggerType,omitempty"` - // MetricColumn - Evaluation of metric on a particular column - MetricColumn *string `json:"metricColumn,omitempty"` -} - -// LogProfileCollection represents a collection of log profiles. -type LogProfileCollection struct { - autorest.Response `json:"-"` - // Value - the values of the log profiles. - Value *[]LogProfileResource `json:"value,omitempty"` -} - -// LogProfileProperties the log profile properties. -type LogProfileProperties struct { - // StorageAccountID - the resource id of the storage account to which you would like to send the Activity Log. - StorageAccountID *string `json:"storageAccountId,omitempty"` - // ServiceBusRuleID - The service bus rule ID of the service bus namespace in which you would like to have Event Hubs created for streaming the Activity Log. The rule ID is of the format: '{service bus resource ID}/authorizationrules/{key name}'. - ServiceBusRuleID *string `json:"serviceBusRuleId,omitempty"` - // Locations - List of regions for which Activity Log events should be stored or streamed. It is a comma separated list of valid ARM locations including the 'global' location. - Locations *[]string `json:"locations,omitempty"` - // Categories - the categories of the logs. These categories are created as is convenient to the user. Some values are: 'Write', 'Delete', and/or 'Action.' - Categories *[]string `json:"categories,omitempty"` - // RetentionPolicy - the retention policy for the events in the log. - RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` -} - -// LogProfileResource the log profile resource. -type LogProfileResource struct { - autorest.Response `json:"-"` - // LogProfileProperties - The log profile properties of the resource. - *LogProfileProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for LogProfileResource. -func (lpr LogProfileResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lpr.LogProfileProperties != nil { - objectMap["properties"] = lpr.LogProfileProperties - } - if lpr.Location != nil { - objectMap["location"] = lpr.Location - } - if lpr.Tags != nil { - objectMap["tags"] = lpr.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for LogProfileResource struct. -func (lpr *LogProfileResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var logProfileProperties LogProfileProperties - err = json.Unmarshal(*v, &logProfileProperties) - if err != nil { - return err - } - lpr.LogProfileProperties = &logProfileProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - lpr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - lpr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - lpr.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - lpr.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - lpr.Tags = tags - } - } - } - - return nil -} - -// LogProfileResourcePatch the log profile resource for patch operations. -type LogProfileResourcePatch struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // LogProfileProperties - The log profile properties for an update operation. - *LogProfileProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for LogProfileResourcePatch. -func (lprp LogProfileResourcePatch) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lprp.Tags != nil { - objectMap["tags"] = lprp.Tags - } - if lprp.LogProfileProperties != nil { - objectMap["properties"] = lprp.LogProfileProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for LogProfileResourcePatch struct. -func (lprp *LogProfileResourcePatch) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - lprp.Tags = tags - } - case "properties": - if v != nil { - var logProfileProperties LogProfileProperties - err = json.Unmarshal(*v, &logProfileProperties) - if err != nil { - return err - } - lprp.LogProfileProperties = &logProfileProperties - } - } - } - - return nil -} - -// LogSearchRule log Search Rule Definition -type LogSearchRule struct { - // Description - The description of the Log Search rule. - Description *string `json:"description,omitempty"` - // Enabled - The flag which indicates whether the Log Search rule is enabled. Value should be true or false. Possible values include: 'True', 'False' - Enabled Enabled `json:"enabled,omitempty"` - // LastUpdatedTime - READ-ONLY; Last time the rule was updated in IS08601 format. - LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` - // ProvisioningState - READ-ONLY; Provisioning state of the scheduled query rule. Possible values include: 'Succeeded', 'Deploying', 'Canceled', 'Failed' - ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` - // Source - Data Source against which rule will Query Data - Source *Source `json:"source,omitempty"` - // Schedule - Schedule (Frequency, Time Window) for rule. Required for action type - AlertingAction - Schedule *Schedule `json:"schedule,omitempty"` - // Action - Action needs to be taken on rule execution. - Action BasicAction `json:"action,omitempty"` -} - -// MarshalJSON is the custom marshaler for LogSearchRule. -func (lsr LogSearchRule) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lsr.Description != nil { - objectMap["description"] = lsr.Description - } - if lsr.Enabled != "" { - objectMap["enabled"] = lsr.Enabled - } - if lsr.Source != nil { - objectMap["source"] = lsr.Source - } - if lsr.Schedule != nil { - objectMap["schedule"] = lsr.Schedule - } - objectMap["action"] = lsr.Action - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for LogSearchRule struct. -func (lsr *LogSearchRule) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "description": - if v != nil { - var description string - err = json.Unmarshal(*v, &description) - if err != nil { - return err - } - lsr.Description = &description - } - case "enabled": - if v != nil { - var enabled Enabled - err = json.Unmarshal(*v, &enabled) - if err != nil { - return err - } - lsr.Enabled = enabled - } - case "lastUpdatedTime": - if v != nil { - var lastUpdatedTime date.Time - err = json.Unmarshal(*v, &lastUpdatedTime) - if err != nil { - return err - } - lsr.LastUpdatedTime = &lastUpdatedTime - } - case "provisioningState": - if v != nil { - var provisioningState ProvisioningState - err = json.Unmarshal(*v, &provisioningState) - if err != nil { - return err - } - lsr.ProvisioningState = provisioningState - } - case "source": - if v != nil { - var source Source - err = json.Unmarshal(*v, &source) - if err != nil { - return err - } - lsr.Source = &source - } - case "schedule": - if v != nil { - var schedule Schedule - err = json.Unmarshal(*v, &schedule) - if err != nil { - return err - } - lsr.Schedule = &schedule - } - case "action": - if v != nil { - action, err := unmarshalBasicAction(*v) - if err != nil { - return err - } - lsr.Action = action - } - } - } - - return nil -} - -// LogSearchRulePatch log Search Rule Definition for Patching -type LogSearchRulePatch struct { - // Enabled - The flag which indicates whether the Log Search rule is enabled. Value should be true or false. Possible values include: 'True', 'False' - Enabled Enabled `json:"enabled,omitempty"` -} - -// LogSearchRuleResource the Log Search Rule resource. -type LogSearchRuleResource struct { - autorest.Response `json:"-"` - // LogSearchRule - The rule properties of the resource. - *LogSearchRule `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for LogSearchRuleResource. -func (lsrr LogSearchRuleResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lsrr.LogSearchRule != nil { - objectMap["properties"] = lsrr.LogSearchRule - } - if lsrr.Location != nil { - objectMap["location"] = lsrr.Location - } - if lsrr.Tags != nil { - objectMap["tags"] = lsrr.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for LogSearchRuleResource struct. -func (lsrr *LogSearchRuleResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var logSearchRule LogSearchRule - err = json.Unmarshal(*v, &logSearchRule) - if err != nil { - return err - } - lsrr.LogSearchRule = &logSearchRule - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - lsrr.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - lsrr.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - lsrr.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - lsrr.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - lsrr.Tags = tags - } - } - } - - return nil -} - -// LogSearchRuleResourceCollection represents a collection of Log Search rule resources. -type LogSearchRuleResourceCollection struct { - autorest.Response `json:"-"` - // Value - The values for the Log Search Rule resources. - Value *[]LogSearchRuleResource `json:"value,omitempty"` -} - -// LogSearchRuleResourcePatch the log search rule resource for patch operations. -type LogSearchRuleResourcePatch struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // LogSearchRulePatch - The log search rule properties of the resource. - *LogSearchRulePatch `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for LogSearchRuleResourcePatch. -func (lsrrp LogSearchRuleResourcePatch) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if lsrrp.Tags != nil { - objectMap["tags"] = lsrrp.Tags - } - if lsrrp.LogSearchRulePatch != nil { - objectMap["properties"] = lsrrp.LogSearchRulePatch - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for LogSearchRuleResourcePatch struct. -func (lsrrp *LogSearchRuleResourcePatch) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - lsrrp.Tags = tags - } - case "properties": - if v != nil { - var logSearchRulePatch LogSearchRulePatch - err = json.Unmarshal(*v, &logSearchRulePatch) - if err != nil { - return err - } - lsrrp.LogSearchRulePatch = &logSearchRulePatch - } - } - } - - return nil -} - -// LogSettings part of MultiTenantDiagnosticSettings. Specifies the settings for a particular log. -type LogSettings struct { - // Category - Name of a Diagnostic Log category for a resource type this setting is applied to. To obtain the list of Diagnostic Log categories for a resource, first perform a GET diagnostic settings operation. - Category *string `json:"category,omitempty"` - // Enabled - a value indicating whether this log is enabled. - Enabled *bool `json:"enabled,omitempty"` - // RetentionPolicy - the retention policy for this log. - RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` -} - -// LogToMetricAction specify action need to be taken when rule type is converting log to metric -type LogToMetricAction struct { - // Criteria - Criteria of Metric - Criteria *[]Criteria `json:"criteria,omitempty"` - // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction' - OdataType OdataTypeBasicAction `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for LogToMetricAction. -func (ltma LogToMetricAction) MarshalJSON() ([]byte, error) { - ltma.OdataType = OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction - objectMap := make(map[string]interface{}) - if ltma.Criteria != nil { - objectMap["criteria"] = ltma.Criteria - } - if ltma.OdataType != "" { - objectMap["odata.type"] = ltma.OdataType - } - return json.Marshal(objectMap) -} - -// AsAlertingAction is the BasicAction implementation for LogToMetricAction. -func (ltma LogToMetricAction) AsAlertingAction() (*AlertingAction, bool) { - return nil, false -} - -// AsLogToMetricAction is the BasicAction implementation for LogToMetricAction. -func (ltma LogToMetricAction) AsLogToMetricAction() (*LogToMetricAction, bool) { - return <ma, true -} - -// AsAction is the BasicAction implementation for LogToMetricAction. -func (ltma LogToMetricAction) AsAction() (*Action, bool) { - return nil, false -} - -// AsBasicAction is the BasicAction implementation for LogToMetricAction. -func (ltma LogToMetricAction) AsBasicAction() (BasicAction, bool) { - return <ma, true -} - -// ManagementEventAggregationCondition how the data that is collected should be combined over time. -type ManagementEventAggregationCondition struct { - // Operator - the condition operator. Possible values include: 'ConditionOperatorGreaterThan', 'ConditionOperatorGreaterThanOrEqual', 'ConditionOperatorLessThan', 'ConditionOperatorLessThanOrEqual' - Operator ConditionOperator `json:"operator,omitempty"` - // Threshold - The threshold value that activates the alert. - Threshold *float64 `json:"threshold,omitempty"` - // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day. - WindowSize *string `json:"windowSize,omitempty"` -} - -// ManagementEventRuleCondition a management event rule condition. -type ManagementEventRuleCondition struct { - // Aggregation - How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate. - Aggregation *ManagementEventAggregationCondition `json:"aggregation,omitempty"` - // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. - DataSource BasicRuleDataSource `json:"dataSource,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' - OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ManagementEventRuleCondition. -func (merc ManagementEventRuleCondition) MarshalJSON() ([]byte, error) { - merc.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition - objectMap := make(map[string]interface{}) - if merc.Aggregation != nil { - objectMap["aggregation"] = merc.Aggregation - } - objectMap["dataSource"] = merc.DataSource - if merc.OdataType != "" { - objectMap["odata.type"] = merc.OdataType - } - return json.Marshal(objectMap) -} - -// AsThresholdRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. -func (merc ManagementEventRuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { - return nil, false -} - -// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. -func (merc ManagementEventRuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { - return nil, false -} - -// AsManagementEventRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. -func (merc ManagementEventRuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { - return &merc, true -} - -// AsRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. -func (merc ManagementEventRuleCondition) AsRuleCondition() (*RuleCondition, bool) { - return nil, false -} - -// AsBasicRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. -func (merc ManagementEventRuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { - return &merc, true -} - -// UnmarshalJSON is the custom unmarshaler for ManagementEventRuleCondition struct. -func (merc *ManagementEventRuleCondition) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "aggregation": - if v != nil { - var aggregation ManagementEventAggregationCondition - err = json.Unmarshal(*v, &aggregation) - if err != nil { - return err - } - merc.Aggregation = &aggregation - } - case "dataSource": - if v != nil { - dataSource, err := unmarshalBasicRuleDataSource(*v) - if err != nil { - return err - } - merc.DataSource = dataSource - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicRuleCondition - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - merc.OdataType = odataType - } - } - } - - return nil -} - -// MetadataValue represents a metric metadata value. -type MetadataValue struct { - // Name - the name of the metadata. - Name *LocalizableString `json:"name,omitempty"` - // Value - the value of the metadata. - Value *string `json:"value,omitempty"` -} - -// Metric the result data of a query. -type Metric struct { - // ID - the metric Id. - ID *string `json:"id,omitempty"` - // Type - the resource type of the metric resource. - Type *string `json:"type,omitempty"` - // Name - the name and the display name of the metric, i.e. it is localizable string. - Name *LocalizableString `json:"name,omitempty"` - // Unit - the unit of the metric. Possible values include: 'UnitCount', 'UnitBytes', 'UnitSeconds', 'UnitCountPerSecond', 'UnitBytesPerSecond', 'UnitPercent', 'UnitMilliSeconds', 'UnitByteSeconds', 'UnitUnspecified', 'UnitCores', 'UnitMilliCores', 'UnitNanoCores', 'UnitBitsPerSecond' - Unit Unit `json:"unit,omitempty"` - // Timeseries - the time series returned when a data query is performed. - Timeseries *[]TimeSeriesElement `json:"timeseries,omitempty"` -} - -// MetricAlertAction an alert action. -type MetricAlertAction struct { - // ActionGroupID - the id of the action group to use. - ActionGroupID *string `json:"actionGroupId,omitempty"` - // WebHookProperties - The properties of a webhook object. - WebHookProperties map[string]*string `json:"webHookProperties"` -} - -// MarshalJSON is the custom marshaler for MetricAlertAction. -func (maa MetricAlertAction) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if maa.ActionGroupID != nil { - objectMap["actionGroupId"] = maa.ActionGroupID - } - if maa.WebHookProperties != nil { - objectMap["webHookProperties"] = maa.WebHookProperties - } - return json.Marshal(objectMap) -} - -// BasicMetricAlertCriteria the rule criteria that defines the conditions of the alert rule. -type BasicMetricAlertCriteria interface { - AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) - AsWebtestLocationAvailabilityCriteria() (*WebtestLocationAvailabilityCriteria, bool) - AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) - AsMetricAlertCriteria() (*MetricAlertCriteria, bool) -} - -// MetricAlertCriteria the rule criteria that defines the conditions of the alert rule. -type MetricAlertCriteria struct { - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria' - OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"` -} - -func unmarshalBasicMetricAlertCriteria(body []byte) (BasicMetricAlertCriteria, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["odata.type"] { - case string(OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria): - var masrmmc MetricAlertSingleResourceMultipleMetricCriteria - err := json.Unmarshal(body, &masrmmc) - return masrmmc, err - case string(OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria): - var wlac WebtestLocationAvailabilityCriteria - err := json.Unmarshal(body, &wlac) - return wlac, err - case string(OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria): - var mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria - err := json.Unmarshal(body, &mamrmmc) - return mamrmmc, err - default: - var mac MetricAlertCriteria - err := json.Unmarshal(body, &mac) - return mac, err - } -} -func unmarshalBasicMetricAlertCriteriaArray(body []byte) ([]BasicMetricAlertCriteria, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - macArray := make([]BasicMetricAlertCriteria, len(rawMessages)) - - for index, rawMessage := range rawMessages { - mac, err := unmarshalBasicMetricAlertCriteria(*rawMessage) - if err != nil { - return nil, err - } - macArray[index] = mac - } - return macArray, nil -} - -// MarshalJSON is the custom marshaler for MetricAlertCriteria. -func (mac MetricAlertCriteria) MarshalJSON() ([]byte, error) { - mac.OdataType = OdataTypeMetricAlertCriteria - objectMap := make(map[string]interface{}) - if mac.OdataType != "" { - objectMap["odata.type"] = mac.OdataType - } - for k, v := range mac.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. -func (mac MetricAlertCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) { - return nil, false -} - -// AsWebtestLocationAvailabilityCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. -func (mac MetricAlertCriteria) AsWebtestLocationAvailabilityCriteria() (*WebtestLocationAvailabilityCriteria, bool) { - return nil, false -} - -// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. -func (mac MetricAlertCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) { - return nil, false -} - -// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. -func (mac MetricAlertCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) { - return &mac, true -} - -// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. -func (mac MetricAlertCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) { - return &mac, true -} - -// UnmarshalJSON is the custom unmarshaler for MetricAlertCriteria struct. -func (mac *MetricAlertCriteria) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if mac.AdditionalProperties == nil { - mac.AdditionalProperties = make(map[string]interface{}) - } - mac.AdditionalProperties[k] = additionalProperties - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicMetricAlertCriteria - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - mac.OdataType = odataType - } - } - } - - return nil -} - -// MetricAlertMultipleResourceMultipleMetricCriteria specifies the metric alert criteria for multiple -// resource that has multiple metric criteria. -type MetricAlertMultipleResourceMultipleMetricCriteria struct { - // AllOf - the list of multiple metric criteria for this 'all of' operation. - AllOf *[]BasicMultiMetricCriteria `json:"allOf,omitempty"` - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria' - OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for MetricAlertMultipleResourceMultipleMetricCriteria. -func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) MarshalJSON() ([]byte, error) { - mamrmmc.OdataType = OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria - objectMap := make(map[string]interface{}) - if mamrmmc.AllOf != nil { - objectMap["allOf"] = mamrmmc.AllOf - } - if mamrmmc.OdataType != "" { - objectMap["odata.type"] = mamrmmc.OdataType - } - for k, v := range mamrmmc.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. -func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) { - return nil, false -} - -// AsWebtestLocationAvailabilityCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. -func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsWebtestLocationAvailabilityCriteria() (*WebtestLocationAvailabilityCriteria, bool) { - return nil, false -} - -// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. -func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) { - return &mamrmmc, true -} - -// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. -func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) { - return nil, false -} - -// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. -func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) { - return &mamrmmc, true -} - -// UnmarshalJSON is the custom unmarshaler for MetricAlertMultipleResourceMultipleMetricCriteria struct. -func (mamrmmc *MetricAlertMultipleResourceMultipleMetricCriteria) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "allOf": - if v != nil { - allOf, err := unmarshalBasicMultiMetricCriteriaArray(*v) - if err != nil { - return err - } - mamrmmc.AllOf = &allOf - } - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if mamrmmc.AdditionalProperties == nil { - mamrmmc.AdditionalProperties = make(map[string]interface{}) - } - mamrmmc.AdditionalProperties[k] = additionalProperties - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicMetricAlertCriteria - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - mamrmmc.OdataType = odataType - } - } - } - - return nil -} - -// MetricAlertProperties an alert rule. -type MetricAlertProperties struct { - // Description - the description of the metric alert that will be included in the alert email. - Description *string `json:"description,omitempty"` - // Severity - Alert severity {0, 1, 2, 3, 4} - Severity *int32 `json:"severity,omitempty"` - // Enabled - the flag that indicates whether the metric alert is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Scopes - the list of resource id's that this metric alert is scoped to. - Scopes *[]string `json:"scopes,omitempty"` - // EvaluationFrequency - how often the metric alert is evaluated represented in ISO 8601 duration format. - EvaluationFrequency *string `json:"evaluationFrequency,omitempty"` - // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. - WindowSize *string `json:"windowSize,omitempty"` - // TargetResourceType - the resource type of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria. - TargetResourceType *string `json:"targetResourceType,omitempty"` - // TargetResourceRegion - the region of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria. - TargetResourceRegion *string `json:"targetResourceRegion,omitempty"` - // Criteria - defines the specific alert criteria information. - Criteria BasicMetricAlertCriteria `json:"criteria,omitempty"` - // AutoMitigate - the flag that indicates whether the alert should be auto resolved or not. The default is true. - AutoMitigate *bool `json:"autoMitigate,omitempty"` - // Actions - the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved. - Actions *[]MetricAlertAction `json:"actions,omitempty"` - // LastUpdatedTime - READ-ONLY; Last time the rule was updated in ISO8601 format. - LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` -} - -// MarshalJSON is the custom marshaler for MetricAlertProperties. -func (mapVar MetricAlertProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mapVar.Description != nil { - objectMap["description"] = mapVar.Description - } - if mapVar.Severity != nil { - objectMap["severity"] = mapVar.Severity - } - if mapVar.Enabled != nil { - objectMap["enabled"] = mapVar.Enabled - } - if mapVar.Scopes != nil { - objectMap["scopes"] = mapVar.Scopes - } - if mapVar.EvaluationFrequency != nil { - objectMap["evaluationFrequency"] = mapVar.EvaluationFrequency - } - if mapVar.WindowSize != nil { - objectMap["windowSize"] = mapVar.WindowSize - } - if mapVar.TargetResourceType != nil { - objectMap["targetResourceType"] = mapVar.TargetResourceType - } - if mapVar.TargetResourceRegion != nil { - objectMap["targetResourceRegion"] = mapVar.TargetResourceRegion - } - objectMap["criteria"] = mapVar.Criteria - if mapVar.AutoMitigate != nil { - objectMap["autoMitigate"] = mapVar.AutoMitigate - } - if mapVar.Actions != nil { - objectMap["actions"] = mapVar.Actions - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for MetricAlertProperties struct. -func (mapVar *MetricAlertProperties) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "description": - if v != nil { - var description string - err = json.Unmarshal(*v, &description) - if err != nil { - return err - } - mapVar.Description = &description - } - case "severity": - if v != nil { - var severity int32 - err = json.Unmarshal(*v, &severity) - if err != nil { - return err - } - mapVar.Severity = &severity - } - case "enabled": - if v != nil { - var enabled bool - err = json.Unmarshal(*v, &enabled) - if err != nil { - return err - } - mapVar.Enabled = &enabled - } - case "scopes": - if v != nil { - var scopes []string - err = json.Unmarshal(*v, &scopes) - if err != nil { - return err - } - mapVar.Scopes = &scopes - } - case "evaluationFrequency": - if v != nil { - var evaluationFrequency string - err = json.Unmarshal(*v, &evaluationFrequency) - if err != nil { - return err - } - mapVar.EvaluationFrequency = &evaluationFrequency - } - case "windowSize": - if v != nil { - var windowSize string - err = json.Unmarshal(*v, &windowSize) - if err != nil { - return err - } - mapVar.WindowSize = &windowSize - } - case "targetResourceType": - if v != nil { - var targetResourceType string - err = json.Unmarshal(*v, &targetResourceType) - if err != nil { - return err - } - mapVar.TargetResourceType = &targetResourceType - } - case "targetResourceRegion": - if v != nil { - var targetResourceRegion string - err = json.Unmarshal(*v, &targetResourceRegion) - if err != nil { - return err - } - mapVar.TargetResourceRegion = &targetResourceRegion - } - case "criteria": - if v != nil { - criteria, err := unmarshalBasicMetricAlertCriteria(*v) - if err != nil { - return err - } - mapVar.Criteria = criteria - } - case "autoMitigate": - if v != nil { - var autoMitigate bool - err = json.Unmarshal(*v, &autoMitigate) - if err != nil { - return err - } - mapVar.AutoMitigate = &autoMitigate - } - case "actions": - if v != nil { - var actions []MetricAlertAction - err = json.Unmarshal(*v, &actions) - if err != nil { - return err - } - mapVar.Actions = &actions - } - case "lastUpdatedTime": - if v != nil { - var lastUpdatedTime date.Time - err = json.Unmarshal(*v, &lastUpdatedTime) - if err != nil { - return err - } - mapVar.LastUpdatedTime = &lastUpdatedTime - } - } - } - - return nil -} - -// MetricAlertResource the metric alert resource. -type MetricAlertResource struct { - autorest.Response `json:"-"` - // MetricAlertProperties - The alert rule properties of the resource. - *MetricAlertProperties `json:"properties,omitempty"` - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for MetricAlertResource. -func (mar MetricAlertResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if mar.MetricAlertProperties != nil { - objectMap["properties"] = mar.MetricAlertProperties - } - if mar.Location != nil { - objectMap["location"] = mar.Location - } - if mar.Tags != nil { - objectMap["tags"] = mar.Tags - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for MetricAlertResource struct. -func (mar *MetricAlertResource) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "properties": - if v != nil { - var metricAlertProperties MetricAlertProperties - err = json.Unmarshal(*v, &metricAlertProperties) - if err != nil { - return err - } - mar.MetricAlertProperties = &metricAlertProperties - } - case "id": - if v != nil { - var ID string - err = json.Unmarshal(*v, &ID) - if err != nil { - return err - } - mar.ID = &ID - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mar.Name = &name - } - case "type": - if v != nil { - var typeVar string - err = json.Unmarshal(*v, &typeVar) - if err != nil { - return err - } - mar.Type = &typeVar - } - case "location": - if v != nil { - var location string - err = json.Unmarshal(*v, &location) - if err != nil { - return err - } - mar.Location = &location - } - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - mar.Tags = tags - } - } - } - - return nil -} - -// MetricAlertResourceCollection represents a collection of alert rule resources. -type MetricAlertResourceCollection struct { - autorest.Response `json:"-"` - // Value - the values for the alert rule resources. - Value *[]MetricAlertResource `json:"value,omitempty"` -} - -// MetricAlertResourcePatch the metric alert resource for patch operations. -type MetricAlertResourcePatch struct { - // Tags - Resource tags - Tags map[string]*string `json:"tags"` - // MetricAlertProperties - The alert rule properties of the resource. - *MetricAlertProperties `json:"properties,omitempty"` -} - -// MarshalJSON is the custom marshaler for MetricAlertResourcePatch. -func (marp MetricAlertResourcePatch) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if marp.Tags != nil { - objectMap["tags"] = marp.Tags - } - if marp.MetricAlertProperties != nil { - objectMap["properties"] = marp.MetricAlertProperties - } - return json.Marshal(objectMap) -} - -// UnmarshalJSON is the custom unmarshaler for MetricAlertResourcePatch struct. -func (marp *MetricAlertResourcePatch) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "tags": - if v != nil { - var tags map[string]*string - err = json.Unmarshal(*v, &tags) - if err != nil { - return err - } - marp.Tags = tags - } - case "properties": - if v != nil { - var metricAlertProperties MetricAlertProperties - err = json.Unmarshal(*v, &metricAlertProperties) - if err != nil { - return err - } - marp.MetricAlertProperties = &metricAlertProperties - } - } - } - - return nil -} - -// MetricAlertSingleResourceMultipleMetricCriteria specifies the metric alert criteria for a single -// resource that has multiple metric criteria. -type MetricAlertSingleResourceMultipleMetricCriteria struct { - // AllOf - The list of metric criteria for this 'all of' operation. - AllOf *[]MetricCriteria `json:"allOf,omitempty"` - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria' - OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for MetricAlertSingleResourceMultipleMetricCriteria. -func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) MarshalJSON() ([]byte, error) { - masrmmc.OdataType = OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria - objectMap := make(map[string]interface{}) - if masrmmc.AllOf != nil { - objectMap["allOf"] = masrmmc.AllOf - } - if masrmmc.OdataType != "" { - objectMap["odata.type"] = masrmmc.OdataType - } - for k, v := range masrmmc.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. -func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) { - return &masrmmc, true -} - -// AsWebtestLocationAvailabilityCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. -func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsWebtestLocationAvailabilityCriteria() (*WebtestLocationAvailabilityCriteria, bool) { - return nil, false -} - -// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. -func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) { - return nil, false -} - -// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. -func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) { - return nil, false -} - -// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. -func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) { - return &masrmmc, true -} - -// UnmarshalJSON is the custom unmarshaler for MetricAlertSingleResourceMultipleMetricCriteria struct. -func (masrmmc *MetricAlertSingleResourceMultipleMetricCriteria) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "allOf": - if v != nil { - var allOf []MetricCriteria - err = json.Unmarshal(*v, &allOf) - if err != nil { - return err - } - masrmmc.AllOf = &allOf - } - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if masrmmc.AdditionalProperties == nil { - masrmmc.AdditionalProperties = make(map[string]interface{}) - } - masrmmc.AdditionalProperties[k] = additionalProperties - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicMetricAlertCriteria - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - masrmmc.OdataType = odataType - } - } - } - - return nil -} - -// MetricAlertStatus an alert status. -type MetricAlertStatus struct { - // Name - The status name. - Name *string `json:"name,omitempty"` - // ID - The alert rule arm id. - ID *string `json:"id,omitempty"` - // Type - The extended resource type name. - Type *string `json:"type,omitempty"` - // Properties - The alert status properties of the metric alert status. - Properties *MetricAlertStatusProperties `json:"properties,omitempty"` -} - -// MetricAlertStatusCollection represents a collection of alert rule resources. -type MetricAlertStatusCollection struct { - autorest.Response `json:"-"` - // Value - the values for the alert rule resources. - Value *[]MetricAlertStatus `json:"value,omitempty"` -} - -// MetricAlertStatusProperties an alert status properties. -type MetricAlertStatusProperties struct { - // Dimensions - An object describing the type of the dimensions. - Dimensions map[string]*string `json:"dimensions"` - // Status - status value - Status *string `json:"status,omitempty"` - // Timestamp - UTC time when the status was checked. - Timestamp *date.Time `json:"timestamp,omitempty"` -} - -// MarshalJSON is the custom marshaler for MetricAlertStatusProperties. -func (masp MetricAlertStatusProperties) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if masp.Dimensions != nil { - objectMap["dimensions"] = masp.Dimensions - } - if masp.Status != nil { - objectMap["status"] = masp.Status - } - if masp.Timestamp != nil { - objectMap["timestamp"] = masp.Timestamp - } - return json.Marshal(objectMap) -} - -// MetricAvailability metric availability specifies the time grain (aggregation interval or frequency) and -// the retention period for that time grain. -type MetricAvailability struct { - // TimeGrain - the time grain specifies the aggregation interval for the metric. Expressed as a duration 'PT1M', 'P1D', etc. - TimeGrain *string `json:"timeGrain,omitempty"` - // Retention - the retention period for the metric at the specified timegrain. Expressed as a duration 'PT1M', 'P1D', etc. - Retention *string `json:"retention,omitempty"` -} - -// MetricCriteria criterion to filter metrics. -type MetricCriteria struct { - // Operator - the criteria operator. Possible values include: 'OperatorEquals', 'OperatorNotEquals', 'OperatorGreaterThan', 'OperatorGreaterThanOrEqual', 'OperatorLessThan', 'OperatorLessThanOrEqual' - Operator Operator `json:"operator,omitempty"` - // Threshold - the criteria threshold value that activates the alert. - Threshold *float64 `json:"threshold,omitempty"` - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // Name - Name of the criteria. - Name *string `json:"name,omitempty"` - // MetricName - Name of the metric. - MetricName *string `json:"metricName,omitempty"` - // MetricNamespace - Namespace of the metric. - MetricNamespace *string `json:"metricNamespace,omitempty"` - // TimeAggregation - the criteria time aggregation types. - TimeAggregation interface{} `json:"timeAggregation,omitempty"` - // Dimensions - List of dimension conditions. - Dimensions *[]MetricDimension `json:"dimensions,omitempty"` - // SkipMetricValidation - Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped. - SkipMetricValidation *bool `json:"skipMetricValidation,omitempty"` - // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion' - CriterionType CriterionType `json:"criterionType,omitempty"` -} - -// MarshalJSON is the custom marshaler for MetricCriteria. -func (mc MetricCriteria) MarshalJSON() ([]byte, error) { - mc.CriterionType = CriterionTypeStaticThresholdCriterion - objectMap := make(map[string]interface{}) - if mc.Operator != "" { - objectMap["operator"] = mc.Operator - } - if mc.Threshold != nil { - objectMap["threshold"] = mc.Threshold - } - if mc.Name != nil { - objectMap["name"] = mc.Name - } - if mc.MetricName != nil { - objectMap["metricName"] = mc.MetricName - } - if mc.MetricNamespace != nil { - objectMap["metricNamespace"] = mc.MetricNamespace - } - if mc.TimeAggregation != nil { - objectMap["timeAggregation"] = mc.TimeAggregation - } - if mc.Dimensions != nil { - objectMap["dimensions"] = mc.Dimensions - } - if mc.SkipMetricValidation != nil { - objectMap["skipMetricValidation"] = mc.SkipMetricValidation - } - if mc.CriterionType != "" { - objectMap["criterionType"] = mc.CriterionType - } - for k, v := range mc.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// AsMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. -func (mc MetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) { - return &mc, true -} - -// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. -func (mc MetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) { - return nil, false -} - -// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. -func (mc MetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) { - return nil, false -} - -// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. -func (mc MetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) { - return &mc, true -} - -// UnmarshalJSON is the custom unmarshaler for MetricCriteria struct. -func (mc *MetricCriteria) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "operator": - if v != nil { - var operator Operator - err = json.Unmarshal(*v, &operator) - if err != nil { - return err - } - mc.Operator = operator - } - case "threshold": - if v != nil { - var threshold float64 - err = json.Unmarshal(*v, &threshold) - if err != nil { - return err - } - mc.Threshold = &threshold - } - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if mc.AdditionalProperties == nil { - mc.AdditionalProperties = make(map[string]interface{}) - } - mc.AdditionalProperties[k] = additionalProperties - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mc.Name = &name - } - case "metricName": - if v != nil { - var metricName string - err = json.Unmarshal(*v, &metricName) - if err != nil { - return err - } - mc.MetricName = &metricName - } - case "metricNamespace": - if v != nil { - var metricNamespace string - err = json.Unmarshal(*v, &metricNamespace) - if err != nil { - return err - } - mc.MetricNamespace = &metricNamespace - } - case "timeAggregation": - if v != nil { - var timeAggregation interface{} - err = json.Unmarshal(*v, &timeAggregation) - if err != nil { - return err - } - mc.TimeAggregation = timeAggregation - } - case "dimensions": - if v != nil { - var dimensions []MetricDimension - err = json.Unmarshal(*v, &dimensions) - if err != nil { - return err - } - mc.Dimensions = &dimensions - } - case "skipMetricValidation": - if v != nil { - var skipMetricValidation bool - err = json.Unmarshal(*v, &skipMetricValidation) - if err != nil { - return err - } - mc.SkipMetricValidation = &skipMetricValidation - } - case "criterionType": - if v != nil { - var criterionType CriterionType - err = json.Unmarshal(*v, &criterionType) - if err != nil { - return err - } - mc.CriterionType = criterionType - } - } - } - - return nil -} - -// MetricDefinition metric definition class specifies the metadata for a metric. -type MetricDefinition struct { - // IsDimensionRequired - Flag to indicate whether the dimension is required. - IsDimensionRequired *bool `json:"isDimensionRequired,omitempty"` - // ResourceID - the resource identifier of the resource that emitted the metric. - ResourceID *string `json:"resourceId,omitempty"` - // Namespace - the namespace the metric belongs to. - Namespace *string `json:"namespace,omitempty"` - // Name - the name and the display name of the metric, i.e. it is a localizable string. - Name *LocalizableString `json:"name,omitempty"` - // Unit - the unit of the metric. Possible values include: 'UnitCount', 'UnitBytes', 'UnitSeconds', 'UnitCountPerSecond', 'UnitBytesPerSecond', 'UnitPercent', 'UnitMilliSeconds', 'UnitByteSeconds', 'UnitUnspecified', 'UnitCores', 'UnitMilliCores', 'UnitNanoCores', 'UnitBitsPerSecond' - Unit Unit `json:"unit,omitempty"` - // PrimaryAggregationType - the primary aggregation type value defining how to use the values for display. Possible values include: 'None', 'Average', 'Count', 'Minimum', 'Maximum', 'Total' - PrimaryAggregationType AggregationType `json:"primaryAggregationType,omitempty"` - // SupportedAggregationTypes - the collection of what aggregation types are supported. - SupportedAggregationTypes *[]AggregationType `json:"supportedAggregationTypes,omitempty"` - // MetricAvailabilities - the collection of what aggregation intervals are available to be queried. - MetricAvailabilities *[]MetricAvailability `json:"metricAvailabilities,omitempty"` - // ID - the resource identifier of the metric definition. - ID *string `json:"id,omitempty"` - // Dimensions - the name and the display name of the dimension, i.e. it is a localizable string. - Dimensions *[]LocalizableString `json:"dimensions,omitempty"` -} - -// MetricDefinitionCollection represents collection of metric definitions. -type MetricDefinitionCollection struct { - autorest.Response `json:"-"` - // Value - the values for the metric definitions. - Value *[]MetricDefinition `json:"value,omitempty"` -} - -// MetricDimension specifies a metric dimension. -type MetricDimension struct { - // Name - Name of the dimension. - Name *string `json:"name,omitempty"` - // Operator - the dimension operator. Only 'Include' and 'Exclude' are supported - Operator *string `json:"operator,omitempty"` - // Values - list of dimension values. - Values *[]string `json:"values,omitempty"` -} - -// MetricSettings part of MultiTenantDiagnosticSettings. Specifies the settings for a particular metric. -type MetricSettings struct { - // TimeGrain - the timegrain of the metric in ISO8601 format. - TimeGrain *string `json:"timeGrain,omitempty"` - // Category - Name of a Diagnostic Metric category for a resource type this setting is applied to. To obtain the list of Diagnostic metric categories for a resource, first perform a GET diagnostic settings operation. - Category *string `json:"category,omitempty"` - // Enabled - a value indicating whether this category is enabled. - Enabled *bool `json:"enabled,omitempty"` - // RetentionPolicy - the retention policy for this category. - RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` -} - -// MetricTrigger the trigger that results in a scaling action. -type MetricTrigger struct { - // MetricName - the name of the metric that defines what the rule monitors. - MetricName *string `json:"metricName,omitempty"` - // MetricNamespace - the namespace of the metric that defines what the rule monitors. - MetricNamespace *string `json:"metricNamespace,omitempty"` - // MetricResourceURI - the resource identifier of the resource the rule monitors. - MetricResourceURI *string `json:"metricResourceUri,omitempty"` - // TimeGrain - the granularity of metrics the rule monitors. Must be one of the predefined values returned from metric definitions for the metric. Must be between 12 hours and 1 minute. - TimeGrain *string `json:"timeGrain,omitempty"` - // Statistic - the metric statistic type. How the metrics from multiple instances are combined. Possible values include: 'MetricStatisticTypeAverage', 'MetricStatisticTypeMin', 'MetricStatisticTypeMax', 'MetricStatisticTypeSum' - Statistic MetricStatisticType `json:"statistic,omitempty"` - // TimeWindow - the range of time in which instance data is collected. This value must be greater than the delay in metric collection, which can vary from resource-to-resource. Must be between 12 hours and 5 minutes. - TimeWindow *string `json:"timeWindow,omitempty"` - // TimeAggregation - time aggregation type. How the data that is collected should be combined over time. The default value is Average. Possible values include: 'TimeAggregationTypeAverage', 'TimeAggregationTypeMinimum', 'TimeAggregationTypeMaximum', 'TimeAggregationTypeTotal', 'TimeAggregationTypeCount', 'TimeAggregationTypeLast' - TimeAggregation TimeAggregationType `json:"timeAggregation,omitempty"` - // Operator - the operator that is used to compare the metric data and the threshold. Possible values include: 'Equals', 'NotEquals', 'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual' - Operator ComparisonOperationType `json:"operator,omitempty"` - // Threshold - the threshold of the metric that triggers the scale action. - Threshold *float64 `json:"threshold,omitempty"` - // Dimensions - List of dimension conditions. For example: [{"DimensionName":"AppName","Operator":"Equals","Values":["App1"]},{"DimensionName":"Deployment","Operator":"Equals","Values":["default"]}]. - Dimensions *[]ScaleRuleMetricDimension `json:"dimensions,omitempty"` -} - -// MetricValue represents a metric value. -type MetricValue struct { - // TimeStamp - the timestamp for the metric value in ISO 8601 format. - TimeStamp *date.Time `json:"timeStamp,omitempty"` - // Average - the average value in the time range. - Average *float64 `json:"average,omitempty"` - // Minimum - the least value in the time range. - Minimum *float64 `json:"minimum,omitempty"` - // Maximum - the greatest value in the time range. - Maximum *float64 `json:"maximum,omitempty"` - // Total - the sum of all of the values in the time range. - Total *float64 `json:"total,omitempty"` - // Count - the number of samples in the time range. Can be used to determine the number of values that contributed to the average value. - Count *float64 `json:"count,omitempty"` -} - -// BasicMultiMetricCriteria the types of conditions for a multi resource alert. -type BasicMultiMetricCriteria interface { - AsMetricCriteria() (*MetricCriteria, bool) - AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) - AsMultiMetricCriteria() (*MultiMetricCriteria, bool) -} - -// MultiMetricCriteria the types of conditions for a multi resource alert. -type MultiMetricCriteria struct { - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // Name - Name of the criteria. - Name *string `json:"name,omitempty"` - // MetricName - Name of the metric. - MetricName *string `json:"metricName,omitempty"` - // MetricNamespace - Namespace of the metric. - MetricNamespace *string `json:"metricNamespace,omitempty"` - // TimeAggregation - the criteria time aggregation types. - TimeAggregation interface{} `json:"timeAggregation,omitempty"` - // Dimensions - List of dimension conditions. - Dimensions *[]MetricDimension `json:"dimensions,omitempty"` - // SkipMetricValidation - Allows creating an alert rule on a custom metric that isn't yet emitted, by causing the metric validation to be skipped. - SkipMetricValidation *bool `json:"skipMetricValidation,omitempty"` - // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion' - CriterionType CriterionType `json:"criterionType,omitempty"` -} - -func unmarshalBasicMultiMetricCriteria(body []byte) (BasicMultiMetricCriteria, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["criterionType"] { - case string(CriterionTypeStaticThresholdCriterion): - var mc MetricCriteria - err := json.Unmarshal(body, &mc) - return mc, err - case string(CriterionTypeDynamicThresholdCriterion): - var dmc DynamicMetricCriteria - err := json.Unmarshal(body, &dmc) - return dmc, err - default: - var mmc MultiMetricCriteria - err := json.Unmarshal(body, &mmc) - return mmc, err - } -} -func unmarshalBasicMultiMetricCriteriaArray(body []byte) ([]BasicMultiMetricCriteria, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - mmcArray := make([]BasicMultiMetricCriteria, len(rawMessages)) - - for index, rawMessage := range rawMessages { - mmc, err := unmarshalBasicMultiMetricCriteria(*rawMessage) - if err != nil { - return nil, err - } - mmcArray[index] = mmc - } - return mmcArray, nil -} - -// MarshalJSON is the custom marshaler for MultiMetricCriteria. -func (mmc MultiMetricCriteria) MarshalJSON() ([]byte, error) { - mmc.CriterionType = CriterionTypeMultiMetricCriteria - objectMap := make(map[string]interface{}) - if mmc.Name != nil { - objectMap["name"] = mmc.Name - } - if mmc.MetricName != nil { - objectMap["metricName"] = mmc.MetricName - } - if mmc.MetricNamespace != nil { - objectMap["metricNamespace"] = mmc.MetricNamespace - } - if mmc.TimeAggregation != nil { - objectMap["timeAggregation"] = mmc.TimeAggregation - } - if mmc.Dimensions != nil { - objectMap["dimensions"] = mmc.Dimensions - } - if mmc.SkipMetricValidation != nil { - objectMap["skipMetricValidation"] = mmc.SkipMetricValidation - } - if mmc.CriterionType != "" { - objectMap["criterionType"] = mmc.CriterionType - } - for k, v := range mmc.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// AsMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. -func (mmc MultiMetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) { - return nil, false -} - -// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. -func (mmc MultiMetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) { - return nil, false -} - -// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. -func (mmc MultiMetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) { - return &mmc, true -} - -// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. -func (mmc MultiMetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) { - return &mmc, true -} - -// UnmarshalJSON is the custom unmarshaler for MultiMetricCriteria struct. -func (mmc *MultiMetricCriteria) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if mmc.AdditionalProperties == nil { - mmc.AdditionalProperties = make(map[string]interface{}) - } - mmc.AdditionalProperties[k] = additionalProperties - } - case "name": - if v != nil { - var name string - err = json.Unmarshal(*v, &name) - if err != nil { - return err - } - mmc.Name = &name - } - case "metricName": - if v != nil { - var metricName string - err = json.Unmarshal(*v, &metricName) - if err != nil { - return err - } - mmc.MetricName = &metricName - } - case "metricNamespace": - if v != nil { - var metricNamespace string - err = json.Unmarshal(*v, &metricNamespace) - if err != nil { - return err - } - mmc.MetricNamespace = &metricNamespace - } - case "timeAggregation": - if v != nil { - var timeAggregation interface{} - err = json.Unmarshal(*v, &timeAggregation) - if err != nil { - return err - } - mmc.TimeAggregation = timeAggregation - } - case "dimensions": - if v != nil { - var dimensions []MetricDimension - err = json.Unmarshal(*v, &dimensions) - if err != nil { - return err - } - mmc.Dimensions = &dimensions - } - case "skipMetricValidation": - if v != nil { - var skipMetricValidation bool - err = json.Unmarshal(*v, &skipMetricValidation) - if err != nil { - return err - } - mmc.SkipMetricValidation = &skipMetricValidation - } - case "criterionType": - if v != nil { - var criterionType CriterionType - err = json.Unmarshal(*v, &criterionType) - if err != nil { - return err - } - mmc.CriterionType = criterionType - } - } - } - - return nil -} - -// Operation microsoft Insights API operation definition. -type Operation struct { - // Name - Operation name: {provider}/{resource}/{operation} - Name *string `json:"name,omitempty"` - // Display - Display metadata associated with the operation. - Display *OperationDisplay `json:"display,omitempty"` -} - -// OperationDisplay display metadata associated with the operation. -type OperationDisplay struct { - // Provider - Service provider: Microsoft.Insights - Provider *string `json:"provider,omitempty"` - // Resource - Resource on which the operation is performed: AlertRules, Autoscale, etc. - Resource *string `json:"resource,omitempty"` - // Operation - Operation type: Read, write, delete, etc. - Operation *string `json:"operation,omitempty"` -} - -// OperationListResult result of the request to list Microsoft.Insights operations. It contains a list of -// operations and a URL link to get the next set of results. -type OperationListResult struct { - autorest.Response `json:"-"` - // Value - List of operations supported by the Microsoft.Insights provider. - Value *[]Operation `json:"value,omitempty"` - // NextLink - URL to get the next set of operation list results if there are any. - NextLink *string `json:"nextLink,omitempty"` -} - -// ProxyOnlyResource a proxy only azure resource object -type ProxyOnlyResource struct { - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ProxyOnlyResource. -func (por ProxyOnlyResource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - return json.Marshal(objectMap) -} - -// Recurrence the repeating times at which this profile begins. This element is not used if the FixedDate -// element is used. -type Recurrence struct { - // Frequency - the recurrence frequency. How often the schedule profile should take effect. This value must be Week, meaning each week will have the same set of profiles. For example, to set a daily schedule, set **schedule** to every day of the week. The frequency property specifies that the schedule is repeated weekly. Possible values include: 'RecurrenceFrequencyNone', 'RecurrenceFrequencySecond', 'RecurrenceFrequencyMinute', 'RecurrenceFrequencyHour', 'RecurrenceFrequencyDay', 'RecurrenceFrequencyWeek', 'RecurrenceFrequencyMonth', 'RecurrenceFrequencyYear' - Frequency RecurrenceFrequency `json:"frequency,omitempty"` - // Schedule - the scheduling constraints for when the profile begins. - Schedule *RecurrentSchedule `json:"schedule,omitempty"` -} - -// RecurrentSchedule the scheduling constraints for when the profile begins. -type RecurrentSchedule struct { - // TimeZone - the timezone for the hours of the profile. Some examples of valid time zones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time - TimeZone *string `json:"timeZone,omitempty"` - // Days - the collection of days that the profile takes effect on. Possible values are Sunday through Saturday. - Days *[]string `json:"days,omitempty"` - // Hours - A collection of hours that the profile takes effect on. Values supported are 0 to 23 on the 24-hour clock (AM/PM times are not supported). - Hours *[]int32 `json:"hours,omitempty"` - // Minutes - A collection of minutes at which the profile takes effect at. - Minutes *[]int32 `json:"minutes,omitempty"` -} - -// Resource an azure resource object -type Resource struct { - // ID - READ-ONLY; Azure resource Id - ID *string `json:"id,omitempty"` - // Name - READ-ONLY; Azure resource name - Name *string `json:"name,omitempty"` - // Type - READ-ONLY; Azure resource type - Type *string `json:"type,omitempty"` - // Location - Resource location - Location *string `json:"location,omitempty"` - // Tags - Resource tags - Tags map[string]*string `json:"tags"` -} - -// MarshalJSON is the custom marshaler for Resource. -func (r Resource) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if r.Location != nil { - objectMap["location"] = r.Location - } - if r.Tags != nil { - objectMap["tags"] = r.Tags - } - return json.Marshal(objectMap) -} - -// Response the response to a metrics query. -type Response struct { - autorest.Response `json:"-"` - // Cost - The integer value representing the cost of the query, for data case. - Cost *float64 `json:"cost,omitempty"` - // Timespan - The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested. - Timespan *string `json:"timespan,omitempty"` - // Interval - The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back from what was originally requested. This is not present if a metadata request was made. - Interval *string `json:"interval,omitempty"` - // Namespace - The namespace of the metrics been queried - Namespace *string `json:"namespace,omitempty"` - // Resourceregion - The region of the resource been queried for metrics. - Resourceregion *string `json:"resourceregion,omitempty"` - // Value - the value of the collection. - Value *[]Metric `json:"value,omitempty"` -} - -// RetentionPolicy specifies the retention policy for the log. -type RetentionPolicy struct { - // Enabled - a value indicating whether the retention policy is enabled. - Enabled *bool `json:"enabled,omitempty"` - // Days - the number of days for the retention in days. A value of 0 will retain the events indefinitely. - Days *int32 `json:"days,omitempty"` -} - -// BasicRuleAction the action that is performed when the alert rule becomes active, and when an alert condition is -// resolved. -type BasicRuleAction interface { - AsRuleEmailAction() (*RuleEmailAction, bool) - AsRuleWebhookAction() (*RuleWebhookAction, bool) - AsRuleAction() (*RuleAction, bool) -} - -// RuleAction the action that is performed when the alert rule becomes active, and when an alert condition is -// resolved. -type RuleAction struct { - // OdataType - Possible values include: 'OdataTypeRuleAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction' - OdataType OdataTypeBasicRuleAction `json:"odata.type,omitempty"` -} - -func unmarshalBasicRuleAction(body []byte) (BasicRuleAction, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["odata.type"] { - case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction): - var rea RuleEmailAction - err := json.Unmarshal(body, &rea) - return rea, err - case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction): - var rwa RuleWebhookAction - err := json.Unmarshal(body, &rwa) - return rwa, err - default: - var ra RuleAction - err := json.Unmarshal(body, &ra) - return ra, err - } -} -func unmarshalBasicRuleActionArray(body []byte) ([]BasicRuleAction, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - raArray := make([]BasicRuleAction, len(rawMessages)) - - for index, rawMessage := range rawMessages { - ra, err := unmarshalBasicRuleAction(*rawMessage) - if err != nil { - return nil, err - } - raArray[index] = ra - } - return raArray, nil -} - -// MarshalJSON is the custom marshaler for RuleAction. -func (ra RuleAction) MarshalJSON() ([]byte, error) { - ra.OdataType = OdataTypeRuleAction - objectMap := make(map[string]interface{}) - if ra.OdataType != "" { - objectMap["odata.type"] = ra.OdataType - } - return json.Marshal(objectMap) -} - -// AsRuleEmailAction is the BasicRuleAction implementation for RuleAction. -func (ra RuleAction) AsRuleEmailAction() (*RuleEmailAction, bool) { - return nil, false -} - -// AsRuleWebhookAction is the BasicRuleAction implementation for RuleAction. -func (ra RuleAction) AsRuleWebhookAction() (*RuleWebhookAction, bool) { - return nil, false -} - -// AsRuleAction is the BasicRuleAction implementation for RuleAction. -func (ra RuleAction) AsRuleAction() (*RuleAction, bool) { - return &ra, true -} - -// AsBasicRuleAction is the BasicRuleAction implementation for RuleAction. -func (ra RuleAction) AsBasicRuleAction() (BasicRuleAction, bool) { - return &ra, true -} - -// BasicRuleCondition the condition that results in the alert rule being activated. -type BasicRuleCondition interface { - AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) - AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) - AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) - AsRuleCondition() (*RuleCondition, bool) -} - -// RuleCondition the condition that results in the alert rule being activated. -type RuleCondition struct { - // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. - DataSource BasicRuleDataSource `json:"dataSource,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' - OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` -} - -func unmarshalBasicRuleCondition(body []byte) (BasicRuleCondition, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["odata.type"] { - case string(OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition): - var trc ThresholdRuleCondition - err := json.Unmarshal(body, &trc) - return trc, err - case string(OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition): - var ltrc LocationThresholdRuleCondition - err := json.Unmarshal(body, <rc) - return ltrc, err - case string(OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition): - var merc ManagementEventRuleCondition - err := json.Unmarshal(body, &merc) - return merc, err - default: - var rc RuleCondition - err := json.Unmarshal(body, &rc) - return rc, err - } -} -func unmarshalBasicRuleConditionArray(body []byte) ([]BasicRuleCondition, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - rcArray := make([]BasicRuleCondition, len(rawMessages)) - - for index, rawMessage := range rawMessages { - rc, err := unmarshalBasicRuleCondition(*rawMessage) - if err != nil { - return nil, err - } - rcArray[index] = rc - } - return rcArray, nil -} - -// MarshalJSON is the custom marshaler for RuleCondition. -func (rc RuleCondition) MarshalJSON() ([]byte, error) { - rc.OdataType = OdataTypeRuleCondition - objectMap := make(map[string]interface{}) - objectMap["dataSource"] = rc.DataSource - if rc.OdataType != "" { - objectMap["odata.type"] = rc.OdataType - } - return json.Marshal(objectMap) -} - -// AsThresholdRuleCondition is the BasicRuleCondition implementation for RuleCondition. -func (rc RuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { - return nil, false -} - -// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for RuleCondition. -func (rc RuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { - return nil, false -} - -// AsManagementEventRuleCondition is the BasicRuleCondition implementation for RuleCondition. -func (rc RuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { - return nil, false -} - -// AsRuleCondition is the BasicRuleCondition implementation for RuleCondition. -func (rc RuleCondition) AsRuleCondition() (*RuleCondition, bool) { - return &rc, true -} - -// AsBasicRuleCondition is the BasicRuleCondition implementation for RuleCondition. -func (rc RuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { - return &rc, true -} - -// UnmarshalJSON is the custom unmarshaler for RuleCondition struct. -func (rc *RuleCondition) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "dataSource": - if v != nil { - dataSource, err := unmarshalBasicRuleDataSource(*v) - if err != nil { - return err - } - rc.DataSource = dataSource - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicRuleCondition - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - rc.OdataType = odataType - } - } - } - - return nil -} - -// BasicRuleDataSource the resource from which the rule collects its data. -type BasicRuleDataSource interface { - AsRuleMetricDataSource() (*RuleMetricDataSource, bool) - AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) - AsRuleDataSource() (*RuleDataSource, bool) -} - -// RuleDataSource the resource from which the rule collects its data. -type RuleDataSource struct { - // ResourceURI - the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule. - ResourceURI *string `json:"resourceUri,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource' - OdataType OdataType `json:"odata.type,omitempty"` -} - -func unmarshalBasicRuleDataSource(body []byte) (BasicRuleDataSource, error) { - var m map[string]interface{} - err := json.Unmarshal(body, &m) - if err != nil { - return nil, err - } - - switch m["odata.type"] { - case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource): - var rmds RuleMetricDataSource - err := json.Unmarshal(body, &rmds) - return rmds, err - case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource): - var rmeds RuleManagementEventDataSource - err := json.Unmarshal(body, &rmeds) - return rmeds, err - default: - var rds RuleDataSource - err := json.Unmarshal(body, &rds) - return rds, err - } -} -func unmarshalBasicRuleDataSourceArray(body []byte) ([]BasicRuleDataSource, error) { - var rawMessages []*json.RawMessage - err := json.Unmarshal(body, &rawMessages) - if err != nil { - return nil, err - } - - rdsArray := make([]BasicRuleDataSource, len(rawMessages)) - - for index, rawMessage := range rawMessages { - rds, err := unmarshalBasicRuleDataSource(*rawMessage) - if err != nil { - return nil, err - } - rdsArray[index] = rds - } - return rdsArray, nil -} - -// MarshalJSON is the custom marshaler for RuleDataSource. -func (rds RuleDataSource) MarshalJSON() ([]byte, error) { - rds.OdataType = OdataTypeRuleDataSource - objectMap := make(map[string]interface{}) - if rds.ResourceURI != nil { - objectMap["resourceUri"] = rds.ResourceURI - } - if rds.OdataType != "" { - objectMap["odata.type"] = rds.OdataType - } - return json.Marshal(objectMap) -} - -// AsRuleMetricDataSource is the BasicRuleDataSource implementation for RuleDataSource. -func (rds RuleDataSource) AsRuleMetricDataSource() (*RuleMetricDataSource, bool) { - return nil, false -} - -// AsRuleManagementEventDataSource is the BasicRuleDataSource implementation for RuleDataSource. -func (rds RuleDataSource) AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) { - return nil, false -} - -// AsRuleDataSource is the BasicRuleDataSource implementation for RuleDataSource. -func (rds RuleDataSource) AsRuleDataSource() (*RuleDataSource, bool) { - return &rds, true -} - -// AsBasicRuleDataSource is the BasicRuleDataSource implementation for RuleDataSource. -func (rds RuleDataSource) AsBasicRuleDataSource() (BasicRuleDataSource, bool) { - return &rds, true -} - -// RuleEmailAction specifies the action to send email when the rule condition is evaluated. The -// discriminator is always RuleEmailAction in this case. -type RuleEmailAction struct { - // SendToServiceOwners - Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated. - SendToServiceOwners *bool `json:"sendToServiceOwners,omitempty"` - // CustomEmails - the list of administrator's custom email addresses to notify of the activation of the alert. - CustomEmails *[]string `json:"customEmails,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction' - OdataType OdataTypeBasicRuleAction `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for RuleEmailAction. -func (rea RuleEmailAction) MarshalJSON() ([]byte, error) { - rea.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction - objectMap := make(map[string]interface{}) - if rea.SendToServiceOwners != nil { - objectMap["sendToServiceOwners"] = rea.SendToServiceOwners - } - if rea.CustomEmails != nil { - objectMap["customEmails"] = rea.CustomEmails - } - if rea.OdataType != "" { - objectMap["odata.type"] = rea.OdataType - } - return json.Marshal(objectMap) -} - -// AsRuleEmailAction is the BasicRuleAction implementation for RuleEmailAction. -func (rea RuleEmailAction) AsRuleEmailAction() (*RuleEmailAction, bool) { - return &rea, true -} - -// AsRuleWebhookAction is the BasicRuleAction implementation for RuleEmailAction. -func (rea RuleEmailAction) AsRuleWebhookAction() (*RuleWebhookAction, bool) { - return nil, false -} - -// AsRuleAction is the BasicRuleAction implementation for RuleEmailAction. -func (rea RuleEmailAction) AsRuleAction() (*RuleAction, bool) { - return nil, false -} - -// AsBasicRuleAction is the BasicRuleAction implementation for RuleEmailAction. -func (rea RuleEmailAction) AsBasicRuleAction() (BasicRuleAction, bool) { - return &rea, true -} - -// RuleManagementEventClaimsDataSource the claims for a rule management event data source. -type RuleManagementEventClaimsDataSource struct { - // EmailAddress - the email address. - EmailAddress *string `json:"emailAddress,omitempty"` -} - -// RuleManagementEventDataSource a rule management event data source. The discriminator fields is always -// RuleManagementEventDataSource in this case. -type RuleManagementEventDataSource struct { - // EventName - the event name. - EventName *string `json:"eventName,omitempty"` - // EventSource - the event source. - EventSource *string `json:"eventSource,omitempty"` - // Level - the level. - Level *string `json:"level,omitempty"` - // OperationName - The name of the operation that should be checked for. If no name is provided, any operation will match. - OperationName *string `json:"operationName,omitempty"` - // ResourceGroupName - the resource group name. - ResourceGroupName *string `json:"resourceGroupName,omitempty"` - // ResourceProviderName - the resource provider name. - ResourceProviderName *string `json:"resourceProviderName,omitempty"` - // Status - The status of the operation that should be checked for. If no status is provided, any status will match. - Status *string `json:"status,omitempty"` - // SubStatus - the substatus. - SubStatus *string `json:"subStatus,omitempty"` - // Claims - the claims. - Claims *RuleManagementEventClaimsDataSource `json:"claims,omitempty"` - // ResourceURI - the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule. - ResourceURI *string `json:"resourceUri,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource' - OdataType OdataType `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for RuleManagementEventDataSource. -func (rmeds RuleManagementEventDataSource) MarshalJSON() ([]byte, error) { - rmeds.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource - objectMap := make(map[string]interface{}) - if rmeds.EventName != nil { - objectMap["eventName"] = rmeds.EventName - } - if rmeds.EventSource != nil { - objectMap["eventSource"] = rmeds.EventSource - } - if rmeds.Level != nil { - objectMap["level"] = rmeds.Level - } - if rmeds.OperationName != nil { - objectMap["operationName"] = rmeds.OperationName - } - if rmeds.ResourceGroupName != nil { - objectMap["resourceGroupName"] = rmeds.ResourceGroupName - } - if rmeds.ResourceProviderName != nil { - objectMap["resourceProviderName"] = rmeds.ResourceProviderName - } - if rmeds.Status != nil { - objectMap["status"] = rmeds.Status - } - if rmeds.SubStatus != nil { - objectMap["subStatus"] = rmeds.SubStatus - } - if rmeds.Claims != nil { - objectMap["claims"] = rmeds.Claims - } - if rmeds.ResourceURI != nil { - objectMap["resourceUri"] = rmeds.ResourceURI - } - if rmeds.OdataType != "" { - objectMap["odata.type"] = rmeds.OdataType - } - return json.Marshal(objectMap) -} - -// AsRuleMetricDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. -func (rmeds RuleManagementEventDataSource) AsRuleMetricDataSource() (*RuleMetricDataSource, bool) { - return nil, false -} - -// AsRuleManagementEventDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. -func (rmeds RuleManagementEventDataSource) AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) { - return &rmeds, true -} - -// AsRuleDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. -func (rmeds RuleManagementEventDataSource) AsRuleDataSource() (*RuleDataSource, bool) { - return nil, false -} - -// AsBasicRuleDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. -func (rmeds RuleManagementEventDataSource) AsBasicRuleDataSource() (BasicRuleDataSource, bool) { - return &rmeds, true -} - -// RuleMetricDataSource a rule metric data source. The discriminator value is always RuleMetricDataSource -// in this case. -type RuleMetricDataSource struct { - // MetricName - the name of the metric that defines what the rule monitors. - MetricName *string `json:"metricName,omitempty"` - // ResourceURI - the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule. - ResourceURI *string `json:"resourceUri,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource' - OdataType OdataType `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for RuleMetricDataSource. -func (rmds RuleMetricDataSource) MarshalJSON() ([]byte, error) { - rmds.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource - objectMap := make(map[string]interface{}) - if rmds.MetricName != nil { - objectMap["metricName"] = rmds.MetricName - } - if rmds.ResourceURI != nil { - objectMap["resourceUri"] = rmds.ResourceURI - } - if rmds.OdataType != "" { - objectMap["odata.type"] = rmds.OdataType - } - return json.Marshal(objectMap) -} - -// AsRuleMetricDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. -func (rmds RuleMetricDataSource) AsRuleMetricDataSource() (*RuleMetricDataSource, bool) { - return &rmds, true -} - -// AsRuleManagementEventDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. -func (rmds RuleMetricDataSource) AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) { - return nil, false -} - -// AsRuleDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. -func (rmds RuleMetricDataSource) AsRuleDataSource() (*RuleDataSource, bool) { - return nil, false -} - -// AsBasicRuleDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. -func (rmds RuleMetricDataSource) AsBasicRuleDataSource() (BasicRuleDataSource, bool) { - return &rmds, true -} - -// RuleWebhookAction specifies the action to post to service when the rule condition is evaluated. The -// discriminator is always RuleWebhookAction in this case. -type RuleWebhookAction struct { - // ServiceURI - the service uri to Post the notification when the alert activates or resolves. - ServiceURI *string `json:"serviceUri,omitempty"` - // Properties - the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload. - Properties map[string]*string `json:"properties"` - // OdataType - Possible values include: 'OdataTypeRuleAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction' - OdataType OdataTypeBasicRuleAction `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for RuleWebhookAction. -func (rwa RuleWebhookAction) MarshalJSON() ([]byte, error) { - rwa.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction - objectMap := make(map[string]interface{}) - if rwa.ServiceURI != nil { - objectMap["serviceUri"] = rwa.ServiceURI - } - if rwa.Properties != nil { - objectMap["properties"] = rwa.Properties - } - if rwa.OdataType != "" { - objectMap["odata.type"] = rwa.OdataType - } - return json.Marshal(objectMap) -} - -// AsRuleEmailAction is the BasicRuleAction implementation for RuleWebhookAction. -func (rwa RuleWebhookAction) AsRuleEmailAction() (*RuleEmailAction, bool) { - return nil, false -} - -// AsRuleWebhookAction is the BasicRuleAction implementation for RuleWebhookAction. -func (rwa RuleWebhookAction) AsRuleWebhookAction() (*RuleWebhookAction, bool) { - return &rwa, true -} - -// AsRuleAction is the BasicRuleAction implementation for RuleWebhookAction. -func (rwa RuleWebhookAction) AsRuleAction() (*RuleAction, bool) { - return nil, false -} - -// AsBasicRuleAction is the BasicRuleAction implementation for RuleWebhookAction. -func (rwa RuleWebhookAction) AsBasicRuleAction() (BasicRuleAction, bool) { - return &rwa, true -} - -// ScaleAction the parameters for the scaling action. -type ScaleAction struct { - // Direction - the scale direction. Whether the scaling action increases or decreases the number of instances. Possible values include: 'ScaleDirectionNone', 'ScaleDirectionIncrease', 'ScaleDirectionDecrease' - Direction ScaleDirection `json:"direction,omitempty"` - // Type - the type of action that should occur when the scale rule fires. Possible values include: 'ChangeCount', 'PercentChangeCount', 'ExactCount' - Type ScaleType `json:"type,omitempty"` - // Value - the number of instances that are involved in the scaling action. This value must be 1 or greater. The default value is 1. - Value *string `json:"value,omitempty"` - // Cooldown - the amount of time to wait since the last scaling action before this action occurs. It must be between 1 week and 1 minute in ISO 8601 format. - Cooldown *string `json:"cooldown,omitempty"` -} - -// ScaleCapacity the number of instances that can be used during this profile. -type ScaleCapacity struct { - // Minimum - the minimum number of instances for the resource. - Minimum *string `json:"minimum,omitempty"` - // Maximum - the maximum number of instances for the resource. The actual maximum number of instances is limited by the cores that are available in the subscription. - Maximum *string `json:"maximum,omitempty"` - // Default - the number of instances that will be set if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. - Default *string `json:"default,omitempty"` -} - -// ScaleRule a rule that provide the triggers and parameters for the scaling action. -type ScaleRule struct { - // MetricTrigger - the trigger that results in a scaling action. - MetricTrigger *MetricTrigger `json:"metricTrigger,omitempty"` - // ScaleAction - the parameters for the scaling action. - ScaleAction *ScaleAction `json:"scaleAction,omitempty"` -} - -// ScaleRuleMetricDimension specifies an auto scale rule metric dimension. -type ScaleRuleMetricDimension struct { - // DimensionName - Name of the dimension. - DimensionName *string `json:"DimensionName,omitempty"` - // Operator - the dimension operator. Only 'Equals' and 'NotEquals' are supported. 'Equals' being equal to any of the values. 'NotEquals' being not equal to all of the values. Possible values include: 'ScaleRuleMetricDimensionOperationTypeEquals', 'ScaleRuleMetricDimensionOperationTypeNotEquals' - Operator ScaleRuleMetricDimensionOperationType `json:"Operator,omitempty"` - // Values - list of dimension values. For example: ["App1","App2"]. - Values *[]string `json:"Values,omitempty"` -} - -// Schedule defines how often to run the search and the time interval. -type Schedule struct { - // FrequencyInMinutes - frequency (in minutes) at which rule condition should be evaluated. - FrequencyInMinutes *int32 `json:"frequencyInMinutes,omitempty"` - // TimeWindowInMinutes - Time window for which data needs to be fetched for query (should be greater than or equal to frequencyInMinutes). - TimeWindowInMinutes *int32 `json:"timeWindowInMinutes,omitempty"` -} - -// SenderAuthorization the authorization used by the user who has performed the operation that led to this -// event. This captures the RBAC properties of the event. These usually include the 'action', 'role' and -// the 'scope' -type SenderAuthorization struct { - // Action - the permissible actions. For instance: microsoft.support/supporttickets/write - Action *string `json:"action,omitempty"` - // Role - the role of the user. For instance: Subscription Admin - Role *string `json:"role,omitempty"` - // Scope - the scope. - Scope *string `json:"scope,omitempty"` -} - -// SmsReceiver an SMS receiver. -type SmsReceiver struct { - // Name - The name of the SMS receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // CountryCode - The country code of the SMS receiver. - CountryCode *string `json:"countryCode,omitempty"` - // PhoneNumber - The phone number of the SMS receiver. - PhoneNumber *string `json:"phoneNumber,omitempty"` - // Status - READ-ONLY; The status of the receiver. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled' - Status ReceiverStatus `json:"status,omitempty"` -} - -// MarshalJSON is the custom marshaler for SmsReceiver. -func (sr SmsReceiver) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if sr.Name != nil { - objectMap["name"] = sr.Name - } - if sr.CountryCode != nil { - objectMap["countryCode"] = sr.CountryCode - } - if sr.PhoneNumber != nil { - objectMap["phoneNumber"] = sr.PhoneNumber - } - return json.Marshal(objectMap) -} - -// Source specifies the log search query. -type Source struct { - // Query - Log search query. Required for action type - AlertingAction - Query *string `json:"query,omitempty"` - // AuthorizedResources - List of Resource referred into query - AuthorizedResources *[]string `json:"authorizedResources,omitempty"` - // DataSourceID - The resource uri over which log search query is to be run. - DataSourceID *string `json:"dataSourceId,omitempty"` - // QueryType - Set value to 'ResultCount'. Possible values include: 'ResultCount' - QueryType QueryType `json:"queryType,omitempty"` -} - -// ThresholdRuleCondition a rule condition based on a metric crossing a threshold. -type ThresholdRuleCondition struct { - // Operator - the operator used to compare the data and the threshold. Possible values include: 'ConditionOperatorGreaterThan', 'ConditionOperatorGreaterThanOrEqual', 'ConditionOperatorLessThan', 'ConditionOperatorLessThanOrEqual' - Operator ConditionOperator `json:"operator,omitempty"` - // Threshold - the threshold value that activates the alert. - Threshold *float64 `json:"threshold,omitempty"` - // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day. - WindowSize *string `json:"windowSize,omitempty"` - // TimeAggregation - the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric. Possible values include: 'TimeAggregationOperatorAverage', 'TimeAggregationOperatorMinimum', 'TimeAggregationOperatorMaximum', 'TimeAggregationOperatorTotal', 'TimeAggregationOperatorLast' - TimeAggregation TimeAggregationOperator `json:"timeAggregation,omitempty"` - // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. - DataSource BasicRuleDataSource `json:"dataSource,omitempty"` - // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' - OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for ThresholdRuleCondition. -func (trc ThresholdRuleCondition) MarshalJSON() ([]byte, error) { - trc.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition - objectMap := make(map[string]interface{}) - if trc.Operator != "" { - objectMap["operator"] = trc.Operator - } - if trc.Threshold != nil { - objectMap["threshold"] = trc.Threshold - } - if trc.WindowSize != nil { - objectMap["windowSize"] = trc.WindowSize - } - if trc.TimeAggregation != "" { - objectMap["timeAggregation"] = trc.TimeAggregation - } - objectMap["dataSource"] = trc.DataSource - if trc.OdataType != "" { - objectMap["odata.type"] = trc.OdataType - } - return json.Marshal(objectMap) -} - -// AsThresholdRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. -func (trc ThresholdRuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { - return &trc, true -} - -// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. -func (trc ThresholdRuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { - return nil, false -} - -// AsManagementEventRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. -func (trc ThresholdRuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { - return nil, false -} - -// AsRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. -func (trc ThresholdRuleCondition) AsRuleCondition() (*RuleCondition, bool) { - return nil, false -} - -// AsBasicRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. -func (trc ThresholdRuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { - return &trc, true -} - -// UnmarshalJSON is the custom unmarshaler for ThresholdRuleCondition struct. -func (trc *ThresholdRuleCondition) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "operator": - if v != nil { - var operator ConditionOperator - err = json.Unmarshal(*v, &operator) - if err != nil { - return err - } - trc.Operator = operator - } - case "threshold": - if v != nil { - var threshold float64 - err = json.Unmarshal(*v, &threshold) - if err != nil { - return err - } - trc.Threshold = &threshold - } - case "windowSize": - if v != nil { - var windowSize string - err = json.Unmarshal(*v, &windowSize) - if err != nil { - return err - } - trc.WindowSize = &windowSize - } - case "timeAggregation": - if v != nil { - var timeAggregation TimeAggregationOperator - err = json.Unmarshal(*v, &timeAggregation) - if err != nil { - return err - } - trc.TimeAggregation = timeAggregation - } - case "dataSource": - if v != nil { - dataSource, err := unmarshalBasicRuleDataSource(*v) - if err != nil { - return err - } - trc.DataSource = dataSource - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicRuleCondition - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - trc.OdataType = odataType - } - } - } - - return nil -} - -// TimeSeriesElement a time series result type. The discriminator value is always TimeSeries in this case. -type TimeSeriesElement struct { - // Metadatavalues - the metadata values returned if $filter was specified in the call. - Metadatavalues *[]MetadataValue `json:"metadatavalues,omitempty"` - // Data - An array of data points representing the metric values. This is only returned if a result type of data is specified. - Data *[]MetricValue `json:"data,omitempty"` -} - -// TimeSeriesInformation the time series info needed for calculating the baseline. -type TimeSeriesInformation struct { - // Sensitivities - the list of sensitivities for calculating the baseline. - Sensitivities *[]string `json:"sensitivities,omitempty"` - // Values - The metric values to calculate the baseline. - Values *[]float64 `json:"values,omitempty"` - // Timestamps - the array of timestamps of the baselines. - Timestamps *[]date.Time `json:"timestamps,omitempty"` -} - -// TimeWindow a specific date-time for the profile. -type TimeWindow struct { - // TimeZone - the timezone of the start and end times for the profile. Some examples of valid time zones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time - TimeZone *string `json:"timeZone,omitempty"` - // Start - the start time for the profile in ISO 8601 format. - Start *date.Time `json:"start,omitempty"` - // End - the end time for the profile in ISO 8601 format. - End *date.Time `json:"end,omitempty"` -} - -// TriggerCondition the condition that results in the Log Search rule. -type TriggerCondition struct { - // ThresholdOperator - Evaluation operation for rule - 'GreaterThan' or 'LessThan. Possible values include: 'ConditionalOperatorGreaterThan', 'ConditionalOperatorLessThan', 'ConditionalOperatorEqual' - ThresholdOperator ConditionalOperator `json:"thresholdOperator,omitempty"` - // Threshold - Result or count threshold based on which rule should be triggered. - Threshold *float64 `json:"threshold,omitempty"` - // MetricTrigger - Trigger condition for metric query rule - MetricTrigger *LogMetricTrigger `json:"metricTrigger,omitempty"` -} - -// VoiceReceiver a voice receiver. -type VoiceReceiver struct { - // Name - The name of the voice receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // CountryCode - The country code of the voice receiver. - CountryCode *string `json:"countryCode,omitempty"` - // PhoneNumber - The phone number of the voice receiver. - PhoneNumber *string `json:"phoneNumber,omitempty"` -} - -// WebhookNotification webhook notification of an autoscale event. -type WebhookNotification struct { - // ServiceURI - the service address to receive the notification. - ServiceURI *string `json:"serviceUri,omitempty"` - // Properties - a property bag of settings. This value can be empty. - Properties map[string]*string `json:"properties"` -} - -// MarshalJSON is the custom marshaler for WebhookNotification. -func (wn WebhookNotification) MarshalJSON() ([]byte, error) { - objectMap := make(map[string]interface{}) - if wn.ServiceURI != nil { - objectMap["serviceUri"] = wn.ServiceURI - } - if wn.Properties != nil { - objectMap["properties"] = wn.Properties - } - return json.Marshal(objectMap) -} - -// WebhookReceiver a webhook receiver. -type WebhookReceiver struct { - // Name - The name of the webhook receiver. Names must be unique across all receivers within an action group. - Name *string `json:"name,omitempty"` - // ServiceURI - The URI where webhooks should be sent. - ServiceURI *string `json:"serviceUri,omitempty"` -} - -// WebtestLocationAvailabilityCriteria specifies the metric alert rule criteria for a web test resource. -type WebtestLocationAvailabilityCriteria struct { - // WebTestID - The Application Insights web test Id. - WebTestID *string `json:"webTestId,omitempty"` - // ComponentID - The Application Insights resource Id. - ComponentID *string `json:"componentId,omitempty"` - // FailedLocationCount - The number of failed locations. - FailedLocationCount *float64 `json:"failedLocationCount,omitempty"` - // AdditionalProperties - Unmatched properties from the message are deserialized this collection - AdditionalProperties map[string]interface{} `json:""` - // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria' - OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"` -} - -// MarshalJSON is the custom marshaler for WebtestLocationAvailabilityCriteria. -func (wlac WebtestLocationAvailabilityCriteria) MarshalJSON() ([]byte, error) { - wlac.OdataType = OdataTypeMicrosoftAzureMonitorWebtestLocationAvailabilityCriteria - objectMap := make(map[string]interface{}) - if wlac.WebTestID != nil { - objectMap["webTestId"] = wlac.WebTestID - } - if wlac.ComponentID != nil { - objectMap["componentId"] = wlac.ComponentID - } - if wlac.FailedLocationCount != nil { - objectMap["failedLocationCount"] = wlac.FailedLocationCount - } - if wlac.OdataType != "" { - objectMap["odata.type"] = wlac.OdataType - } - for k, v := range wlac.AdditionalProperties { - objectMap[k] = v - } - return json.Marshal(objectMap) -} - -// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for WebtestLocationAvailabilityCriteria. -func (wlac WebtestLocationAvailabilityCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) { - return nil, false -} - -// AsWebtestLocationAvailabilityCriteria is the BasicMetricAlertCriteria implementation for WebtestLocationAvailabilityCriteria. -func (wlac WebtestLocationAvailabilityCriteria) AsWebtestLocationAvailabilityCriteria() (*WebtestLocationAvailabilityCriteria, bool) { - return &wlac, true -} - -// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for WebtestLocationAvailabilityCriteria. -func (wlac WebtestLocationAvailabilityCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) { - return nil, false -} - -// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for WebtestLocationAvailabilityCriteria. -func (wlac WebtestLocationAvailabilityCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) { - return nil, false -} - -// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for WebtestLocationAvailabilityCriteria. -func (wlac WebtestLocationAvailabilityCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) { - return &wlac, true -} - -// UnmarshalJSON is the custom unmarshaler for WebtestLocationAvailabilityCriteria struct. -func (wlac *WebtestLocationAvailabilityCriteria) UnmarshalJSON(body []byte) error { - var m map[string]*json.RawMessage - err := json.Unmarshal(body, &m) - if err != nil { - return err - } - for k, v := range m { - switch k { - case "webTestId": - if v != nil { - var webTestID string - err = json.Unmarshal(*v, &webTestID) - if err != nil { - return err - } - wlac.WebTestID = &webTestID - } - case "componentId": - if v != nil { - var componentID string - err = json.Unmarshal(*v, &componentID) - if err != nil { - return err - } - wlac.ComponentID = &componentID - } - case "failedLocationCount": - if v != nil { - var failedLocationCount float64 - err = json.Unmarshal(*v, &failedLocationCount) - if err != nil { - return err - } - wlac.FailedLocationCount = &failedLocationCount - } - default: - if v != nil { - var additionalProperties interface{} - err = json.Unmarshal(*v, &additionalProperties) - if err != nil { - return err - } - if wlac.AdditionalProperties == nil { - wlac.AdditionalProperties = make(map[string]interface{}) - } - wlac.AdditionalProperties[k] = additionalProperties - } - case "odata.type": - if v != nil { - var odataType OdataTypeBasicMetricAlertCriteria - err = json.Unmarshal(*v, &odataType) - if err != nil { - return err - } - wlac.OdataType = odataType - } - } - } - - return nil -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go deleted file mode 100644 index 3f82351c7f2..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go +++ /dev/null @@ -1,98 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// OperationsClient is the monitor Management Client -type OperationsClient struct { - BaseClient -} - -// NewOperationsClient creates an instance of the OperationsClient client. -func NewOperationsClient(subscriptionID string) OperationsClient { - return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this -// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). -func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { - return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List lists all of the available operations from Microsoft.Insights provider. -func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListPreparer(ctx) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.OperationsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.OperationsClient", "List", resp, "Failure sending request") - return - } - - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.OperationsClient", "List", resp, "Failure responding to request") - return - } - - return -} - -// ListPreparer prepares the List request. -func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/microsoft.insights/operations"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go deleted file mode 100644 index c9d43964ecb..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go +++ /dev/null @@ -1,511 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/autorest/validation" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// ScheduledQueryRulesClient is the monitor Management Client -type ScheduledQueryRulesClient struct { - BaseClient -} - -// NewScheduledQueryRulesClient creates an instance of the ScheduledQueryRulesClient client. -func NewScheduledQueryRulesClient(subscriptionID string) ScheduledQueryRulesClient { - return NewScheduledQueryRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewScheduledQueryRulesClientWithBaseURI creates an instance of the ScheduledQueryRulesClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewScheduledQueryRulesClientWithBaseURI(baseURI string, subscriptionID string) ScheduledQueryRulesClient { - return ScheduledQueryRulesClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// CreateOrUpdate creates or updates an log search rule. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// parameters - the parameters of the rule to create or update. -func (client ScheduledQueryRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResource) (result LogSearchRuleResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.CreateOrUpdate") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - if err := validation.Validate([]validation.Validation{ - {TargetValue: parameters, - Constraints: []validation.Constraint{{Target: "parameters.LogSearchRule", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Source", Name: validation.Null, Rule: true, - Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Source.DataSourceID", Name: validation.Null, Rule: true, Chain: nil}}}, - {Target: "parameters.LogSearchRule.Schedule", Name: validation.Null, Rule: false, - Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Schedule.FrequencyInMinutes", Name: validation.Null, Rule: true, Chain: nil}, - {Target: "parameters.LogSearchRule.Schedule.TimeWindowInMinutes", Name: validation.Null, Rule: true, Chain: nil}, - }}, - }}}}}); err != nil { - return result, validation.NewError("insights.ScheduledQueryRulesClient", "CreateOrUpdate", err.Error()) - } - - req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, ruleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "CreateOrUpdate", nil, "Failure preparing request") - return - } - - resp, err := client.CreateOrUpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "CreateOrUpdate", resp, "Failure sending request") - return - } - - result, err = client.CreateOrUpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "CreateOrUpdate", resp, "Failure responding to request") - return - } - - return -} - -// CreateOrUpdatePreparer prepares the CreateOrUpdate request. -func (client ScheduledQueryRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResource) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-04-16" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPut(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the -// http.Response Body if it receives an error. -func (client ScheduledQueryRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always -// closes the http.Response Body. -func (client ScheduledQueryRulesClient) CreateOrUpdateResponder(resp *http.Response) (result LogSearchRuleResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Delete deletes a Log Search rule -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client ScheduledQueryRulesClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Delete") - defer func() { - sc := -1 - if result.Response != nil { - sc = result.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Delete", nil, "Failure preparing request") - return - } - - resp, err := client.DeleteSender(req) - if err != nil { - result.Response = resp - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Delete", resp, "Failure sending request") - return - } - - result, err = client.DeleteResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Delete", resp, "Failure responding to request") - return - } - - return -} - -// DeletePreparer prepares the Delete request. -func (client ScheduledQueryRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-04-16" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsDelete(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// DeleteSender sends the Delete request. The method will close the -// http.Response Body if it receives an error. -func (client ScheduledQueryRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// DeleteResponder handles the response to the Delete request. The method always -// closes the http.Response Body. -func (client ScheduledQueryRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), - autorest.ByClosing()) - result.Response = resp - return -} - -// Get gets an Log Search rule -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -func (client ScheduledQueryRulesClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result LogSearchRuleResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Get") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.GetPreparer(ctx, resourceGroupName, ruleName) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Get", nil, "Failure preparing request") - return - } - - resp, err := client.GetSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Get", resp, "Failure sending request") - return - } - - result, err = client.GetResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Get", resp, "Failure responding to request") - return - } - - return -} - -// GetPreparer prepares the Get request. -func (client ScheduledQueryRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-04-16" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// GetSender sends the Get request. The method will close the -// http.Response Body if it receives an error. -func (client ScheduledQueryRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// GetResponder handles the response to the Get request. The method always -// closes the http.Response Body. -func (client ScheduledQueryRulesClient) GetResponder(resp *http.Response) (result LogSearchRuleResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListByResourceGroup list the Log Search rules within a resource group. -// Parameters: -// resourceGroupName - the name of the resource group. -// filter - the filter to apply on the operation. For more information please see -// https://msdn.microsoft.com/en-us/library/azure/dn931934.aspx -func (client ScheduledQueryRulesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, filter string) (result LogSearchRuleResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.ListByResourceGroup") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, filter) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListByResourceGroup", nil, "Failure preparing request") - return - } - - resp, err := client.ListByResourceGroupSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListByResourceGroup", resp, "Failure sending request") - return - } - - result, err = client.ListByResourceGroupResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListByResourceGroup", resp, "Failure responding to request") - return - } - - return -} - -// ListByResourceGroupPreparer prepares the ListByResourceGroup request. -func (client ScheduledQueryRulesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-04-16" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the -// http.Response Body if it receives an error. -func (client ScheduledQueryRulesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always -// closes the http.Response Body. -func (client ScheduledQueryRulesClient) ListByResourceGroupResponder(resp *http.Response) (result LogSearchRuleResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// ListBySubscription list the Log Search rules within a subscription group. -// Parameters: -// filter - the filter to apply on the operation. For more information please see -// https://msdn.microsoft.com/en-us/library/azure/dn931934.aspx -func (client ScheduledQueryRulesClient) ListBySubscription(ctx context.Context, filter string) (result LogSearchRuleResourceCollection, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.ListBySubscription") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.ListBySubscriptionPreparer(ctx, filter) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListBySubscription", nil, "Failure preparing request") - return - } - - resp, err := client.ListBySubscriptionSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListBySubscription", resp, "Failure sending request") - return - } - - result, err = client.ListBySubscriptionResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListBySubscription", resp, "Failure responding to request") - return - } - - return -} - -// ListBySubscriptionPreparer prepares the ListBySubscription request. -func (client ScheduledQueryRulesClient) ListBySubscriptionPreparer(ctx context.Context, filter string) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-04-16" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/scheduledQueryRules", pathParameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListBySubscriptionSender sends the ListBySubscription request. The method will close the -// http.Response Body if it receives an error. -func (client ScheduledQueryRulesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always -// closes the http.Response Body. -func (client ScheduledQueryRulesClient) ListBySubscriptionResponder(resp *http.Response) (result LogSearchRuleResourceCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// Update update log search Rule. -// Parameters: -// resourceGroupName - the name of the resource group. -// ruleName - the name of the rule. -// parameters - the parameters of the rule to update. -func (client ScheduledQueryRulesClient) Update(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResourcePatch) (result LogSearchRuleResource, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Update") - defer func() { - sc := -1 - if result.Response.Response != nil { - sc = result.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, parameters) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Update", nil, "Failure preparing request") - return - } - - resp, err := client.UpdateSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Update", resp, "Failure sending request") - return - } - - result, err = client.UpdateResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Update", resp, "Failure responding to request") - return - } - - return -} - -// UpdatePreparer prepares the Update request. -func (client ScheduledQueryRulesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResourcePatch) (*http.Request, error) { - pathParameters := map[string]interface{}{ - "resourceGroupName": autorest.Encode("path", resourceGroupName), - "ruleName": autorest.Encode("path", ruleName), - "subscriptionId": autorest.Encode("path", client.SubscriptionID), - } - - const APIVersion = "2018-04-16" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsContentType("application/json; charset=utf-8"), - autorest.AsPatch(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), - autorest.WithJSON(parameters), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// UpdateSender sends the Update request. The method will close the -// http.Response Body if it receives an error. -func (client ScheduledQueryRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { - return client.Send(req, azure.DoRetryWithRegistration(client.Client)) -} - -// UpdateResponder handles the response to the Update request. The method always -// closes the http.Response Body. -func (client ScheduledQueryRulesClient) UpdateResponder(resp *http.Response) (result LogSearchRuleResource, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go deleted file mode 100644 index 7501f047bb3..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go +++ /dev/null @@ -1,168 +0,0 @@ -package insights - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/Azure/go-autorest/tracing" - "net/http" -) - -// TenantActivityLogsClient is the monitor Management Client -type TenantActivityLogsClient struct { - BaseClient -} - -// NewTenantActivityLogsClient creates an instance of the TenantActivityLogsClient client. -func NewTenantActivityLogsClient(subscriptionID string) TenantActivityLogsClient { - return NewTenantActivityLogsClientWithBaseURI(DefaultBaseURI, subscriptionID) -} - -// NewTenantActivityLogsClientWithBaseURI creates an instance of the TenantActivityLogsClient client using a custom -// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure -// stack). -func NewTenantActivityLogsClientWithBaseURI(baseURI string, subscriptionID string) TenantActivityLogsClient { - return TenantActivityLogsClient{NewWithBaseURI(baseURI, subscriptionID)} -} - -// List gets the Activity Logs for the Tenant.
Everything that is applicable to the API to get the Activity Logs for -// the subscription is applicable to this API (the parameters, $filter, etc.).
One thing to point out here is that -// this API does *not* retrieve the logs at the individual subscription of the tenant but only surfaces the logs that -// were generated at the tenant level. -// Parameters: -// filter - reduces the set of data collected.
The **$filter** is very restricted and allows only the -// following patterns.
- List events for a resource group: $filter=eventTimestamp ge '' and -// eventTimestamp le '' and eventChannels eq 'Admin, Operation' and resourceGroupName eq -// ''.
- List events for resource: $filter=eventTimestamp ge '' and -// eventTimestamp le '' and eventChannels eq 'Admin, Operation' and resourceUri eq -// ''.
- List events for a subscription: $filter=eventTimestamp ge '' and -// eventTimestamp le '' and eventChannels eq 'Admin, Operation'.
- List events for a resource -// provider: $filter=eventTimestamp ge '' and eventTimestamp le '' and eventChannels eq -// 'Admin, Operation' and resourceProvider eq ''.
- List events for a correlation Id: -// api-version=2014-04-01&$filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and eventTimestamp le -// '2014-07-20T04:36:37.6407898Z' and eventChannels eq 'Admin, Operation' and correlationId eq -// ''.
**NOTE**: No other syntax is allowed. -// selectParameter - used to fetch events with only the given properties.
The **$select** argument is a -// comma separated list of property names to be returned. Possible values are: *authorization*, *claims*, -// *correlationId*, *description*, *eventDataId*, *eventName*, *eventTimestamp*, *httpRequest*, *level*, -// *operationId*, *operationName*, *properties*, *resourceGroupName*, *resourceProviderName*, *resourceId*, -// *status*, *submissionTimestamp*, *subStatus*, *subscriptionId* -func (client TenantActivityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionPage, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TenantActivityLogsClient.List") - defer func() { - sc := -1 - if result.edc.Response.Response != nil { - sc = result.edc.Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.fn = client.listNextResults - req, err := client.ListPreparer(ctx, filter, selectParameter) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "List", nil, "Failure preparing request") - return - } - - resp, err := client.ListSender(req) - if err != nil { - result.edc.Response = autorest.Response{Response: resp} - err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "List", resp, "Failure sending request") - return - } - - result.edc, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "List", resp, "Failure responding to request") - return - } - if result.edc.hasNextLink() && result.edc.IsEmpty() { - err = result.NextWithContext(ctx) - return - } - - return -} - -// ListPreparer prepares the List request. -func (client TenantActivityLogsClient) ListPreparer(ctx context.Context, filter string, selectParameter string) (*http.Request, error) { - const APIVersion = "2015-04-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - if len(filter) > 0 { - queryParameters["$filter"] = autorest.Encode("query", filter) - } - if len(selectParameter) > 0 { - queryParameters["$select"] = autorest.Encode("query", selectParameter) - } - - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(client.BaseURI), - autorest.WithPath("/providers/microsoft.insights/eventtypes/management/values"), - autorest.WithQueryParameters(queryParameters)) - return preparer.Prepare((&http.Request{}).WithContext(ctx)) -} - -// ListSender sends the List request. The method will close the -// http.Response Body if it receives an error. -func (client TenantActivityLogsClient) ListSender(req *http.Request) (*http.Response, error) { - return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) -} - -// ListResponder handles the response to the List request. The method always -// closes the http.Response Body. -func (client TenantActivityLogsClient) ListResponder(resp *http.Response) (result EventDataCollection, err error) { - err = autorest.Respond( - resp, - azure.WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&result), - autorest.ByClosing()) - result.Response = autorest.Response{Response: resp} - return -} - -// listNextResults retrieves the next set of results, if any. -func (client TenantActivityLogsClient) listNextResults(ctx context.Context, lastResults EventDataCollection) (result EventDataCollection, err error) { - req, err := lastResults.eventDataCollectionPreparer(ctx) - if err != nil { - return result, autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "listNextResults", nil, "Failure preparing next results request") - } - if req == nil { - return - } - resp, err := client.ListSender(req) - if err != nil { - result.Response = autorest.Response{Response: resp} - return result, autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "listNextResults", resp, "Failure sending next results request") - } - result, err = client.ListResponder(resp) - if err != nil { - err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "listNextResults", resp, "Failure responding to next results request") - } - return -} - -// ListComplete enumerates all values, automatically crossing page boundaries as required. -func (client TenantActivityLogsClient) ListComplete(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionIterator, err error) { - if tracing.IsEnabled() { - ctx = tracing.StartSpan(ctx, fqdn+"/TenantActivityLogsClient.List") - defer func() { - sc := -1 - if result.Response().Response.Response != nil { - sc = result.page.Response().Response.Response.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - } - result.page, err = client.List(ctx, filter, selectParameter) - return -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go deleted file mode 100644 index f18278031f7..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/version.go +++ /dev/null @@ -1,19 +0,0 @@ -package insights - -import "github.com/Azure/azure-sdk-for-go/version" - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/" + Version() + " insights/2018-03-01" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return version.Number -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go deleted file mode 100644 index 25fe6842215..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go +++ /dev/null @@ -1,65 +0,0 @@ -package azblob - -import ( - "time" -) - -// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set. -type ModifiedAccessConditions struct { - IfModifiedSince time.Time - IfUnmodifiedSince time.Time - IfMatch ETag - IfNoneMatch ETag -} - -// pointers is for internal infrastructure. It returns the fields as pointers. -func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { - if !ac.IfModifiedSince.IsZero() { - ims = &ac.IfModifiedSince - } - if !ac.IfUnmodifiedSince.IsZero() { - ius = &ac.IfUnmodifiedSince - } - if ac.IfMatch != ETagNone { - ime = &ac.IfMatch - } - if ac.IfNoneMatch != ETagNone { - inme = &ac.IfNoneMatch - } - return -} - -// ContainerAccessConditions identifies container-specific access conditions which you optionally set. -type ContainerAccessConditions struct { - ModifiedAccessConditions - LeaseAccessConditions -} - -// BlobAccessConditions identifies blob-specific access conditions which you optionally set. -type BlobAccessConditions struct { - ModifiedAccessConditions - LeaseAccessConditions -} - -// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set. -type LeaseAccessConditions struct { - LeaseID string -} - -// pointers is for internal infrastructure. It returns the fields as pointers. -func (ac LeaseAccessConditions) pointers() (leaseID *string) { - if ac.LeaseID != "" { - leaseID = &ac.LeaseID - } - return -} - -/* -// getInt32 is for internal infrastructure. It is used with access condition values where -// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header -// and the privately-storage field in the access condition object is stored as +1 higher than desired. -// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value). -func getInt32(value int32) (bool, int32) { - return value > 0, value - 1 -} -*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go deleted file mode 100644 index 8d82ebe8c6a..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go +++ /dev/null @@ -1,24 +0,0 @@ -package azblob - -import ( - "errors" -) - -type bytesWriter []byte - -func newBytesWriter(b []byte) bytesWriter { - return b -} - -func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { - if off >= int64(len(c)) || off < 0 { - return 0, errors.New("Offset value is out of range") - } - - n := copy(c[int(off):], b) - if n < len(b) { - return n, errors.New("Not enough space for all bytes") - } - - return n, nil -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go deleted file mode 100644 index 6beb80f8800..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go +++ /dev/null @@ -1,215 +0,0 @@ -package azblob - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - - guuid "github.com/google/uuid" -) - -// blockWriter provides methods to upload blocks that represent a file to a server and commit them. -// This allows us to provide a local implementation that fakes the server for hermetic testing. -type blockWriter interface { - StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte, ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) - CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions, AccessTierType, BlobTagsMap, ClientProvidedKeyOptions, ImmutabilityPolicyOptions) (*BlockBlobCommitBlockListResponse, error) -} - -// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. -// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably -// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The -// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload -// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works -// well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can -// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). -// We can even provide a utility to dial this number in for customer networks to optimize their copies. -func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) { - if err := o.defaults(); err != nil { - return nil, err - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - cp := &copier{ - ctx: ctx, - cancel: cancel, - reader: from, - to: to, - id: newID(), - o: o, - errCh: make(chan error, 1), - } - - // Send all our chunks until we get an error. - var err error - for { - if err = cp.sendChunk(); err != nil { - break - } - } - // If the error is not EOF, then we have a problem. - if err != nil && !errors.Is(err, io.EOF) { - cp.wg.Wait() - return nil, err - } - - // Close out our upload. - if err := cp.close(); err != nil { - return nil, err - } - - return cp.result, nil -} - -// copier streams a file via chunks in parallel from a reader representing a file. -// Do not use directly, instead use copyFromReader(). -type copier struct { - // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, - // the copier has the lifetime of a function call, so its fine. - ctx context.Context - cancel context.CancelFunc - - // o contains our options for uploading. - o UploadStreamToBlockBlobOptions - - // id provides the ids for each chunk. - id *id - - // reader is the source to be written to storage. - reader io.Reader - // to is the location we are writing our chunks to. - to blockWriter - - // errCh is used to hold the first error from our concurrent writers. - errCh chan error - // wg provides a count of how many writers we are waiting to finish. - wg sync.WaitGroup - - // result holds the final result from blob storage after we have submitted all chunks. - result *BlockBlobCommitBlockListResponse -} - -type copierChunk struct { - buffer []byte - id string - length int -} - -// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error -// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier. -func (c *copier) getErr() error { - select { - case err := <-c.errCh: - return err - default: - } - return c.ctx.Err() -} - -// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. -// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. -func (c *copier) sendChunk() error { - if err := c.getErr(); err != nil { - return err - } - - buffer := c.o.TransferManager.Get() - if len(buffer) == 0 { - return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") - } - - n, err := io.ReadFull(c.reader, buffer) - if n > 0 { - // Some data was read, schedule the write. - id := c.id.next() - c.wg.Add(1) - c.o.TransferManager.Run( - func() { - defer c.wg.Done() - c.write(copierChunk{buffer: buffer, id: id, length: n}) - }, - ) - } else { - // Return the unused buffer to the manager. - c.o.TransferManager.Put(buffer) - } - - if err == nil { - return nil - } else if err == io.EOF || err == io.ErrUnexpectedEOF { - return io.EOF - } - - if cerr := c.getErr(); cerr != nil { - return cerr - } - - return err -} - -// write uploads a chunk to blob storage. -func (c *copier) write(chunk copierChunk) { - defer c.o.TransferManager.Put(chunk.buffer) - - if err := c.ctx.Err(); err != nil { - return - } - - _, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer[:chunk.length]), c.o.AccessConditions.LeaseAccessConditions, nil, c.o.ClientProvidedKeyOptions) - if err != nil { - c.errCh <- fmt.Errorf("write error: %w", err) - return - } -} - -// close commits our blocks to blob storage and closes our writer. -func (c *copier) close() error { - c.wg.Wait() - - if err := c.getErr(); err != nil { - return err - } - - var err error - c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions, c.o.BlobAccessTier, c.o.BlobTagsMap, c.o.ClientProvidedKeyOptions, c.o.ImmutabilityPolicyOptions) - return err -} - -// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. -type id struct { - u [64]byte - num uint32 - all []string -} - -// newID constructs a new id. -func newID() *id { - uu := guuid.New() - u := [64]byte{} - copy(u[:], uu[:]) - return &id{u: u} -} - -// next returns the next ID. -func (id *id) next() string { - defer atomic.AddUint32(&id.num, 1) - - binary.BigEndian.PutUint32(id.u[len(guuid.UUID{}):], atomic.LoadUint32(&id.num)) - str := base64.StdEncoding.EncodeToString(id.u[:]) - id.all = append(id.all, str) - - return str -} - -// issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return. -// The value is only valid until the next time next() is called. -func (id *id) issued() []string { - return id.all -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go deleted file mode 100644 index 18c3c2655da..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go +++ /dev/null @@ -1 +0,0 @@ -package azblob diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go deleted file mode 100644 index 1a6da586fd5..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go +++ /dev/null @@ -1,569 +0,0 @@ -package azblob - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "net/http" - "os" - "sync" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// CommonResponse returns the headers common to all blob REST API responses. -type CommonResponse interface { - // ETag returns the value for header ETag. - ETag() ETag - - // LastModified returns the value for header Last-Modified. - LastModified() time.Time - - // RequestID returns the value for header x-ms-request-id. - RequestID() string - - // Date returns the value for header Date. - Date() time.Time - - // Version returns the value for header x-ms-version. - Version() string - - // Response returns the raw HTTP response object. - Response() *http.Response -} - -// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions. -type UploadToBlockBlobOptions struct { - // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL. - // Note that the progress reporting is not always increasing; it can go down when retrying a request. - Progress pipeline.ProgressReceiver - - // BlobHTTPHeaders indicates the HTTP headers to be associated with the blob. - BlobHTTPHeaders BlobHTTPHeaders - - // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. - Metadata Metadata - - // AccessConditions indicates the access conditions for the block blob. - AccessConditions BlobAccessConditions - - // BlobAccessTier indicates the tier of blob - BlobAccessTier AccessTierType - - // BlobTagsMap - BlobTagsMap BlobTagsMap - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - ClientProvidedKeyOptions ClientProvidedKeyOptions - - // ImmutabilityPolicyOptions indicates a immutability policy or legal hold to be placed upon finishing upload. - // A container with object-level immutability enabled is required. - ImmutabilityPolicyOptions ImmutabilityPolicyOptions - - // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) - Parallelism uint16 -} - -// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. -func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, - blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { - if o.BlockSize == 0 { - // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error - if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { - return nil, errors.New("buffer is too large to upload to a block blob") - } - // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if readerSize <= BlockBlobMaxUploadBlobBytes { - o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified - } else { - o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks - if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB - o.BlockSize = BlobDefaultDownloadBlockSize - } - // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). - } - } - - if readerSize <= BlockBlobMaxUploadBlobBytes { - // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) - if o.Progress != nil { - body = pipeline.NewRequestBodyProgress(body, o.Progress) - } - return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) - } - - var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) - - blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs - progress := int64(0) - progressLock := &sync.Mutex{} - - err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "uploadReaderAtToBlockBlob", - TransferSize: readerSize, - ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, - Operation: func(offset int64, count int64, ctx context.Context) error { - // This function is called once per block. - // It is passed this block's offset within the buffer and its count of bytes - // Prepare to read the proper block/section of the buffer - var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) - blockNum := offset / o.BlockSize - if o.Progress != nil { - blockProgress := int64(0) - body = pipeline.NewRequestBodyProgress(body, - func(bytesTransferred int64) { - diff := bytesTransferred - blockProgress - blockProgress = bytesTransferred - progressLock.Lock() // 1 goroutine at a time gets a progress report - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - - // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks - // at the same time causing PutBlockList to get a mix of blocks from all the clients. - blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) - _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions) - return err - }, - }) - if err != nil { - return nil, err - } - // All put blocks were successful, call Put Block List to finalize the blob - return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) -} - -// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. -func UploadBufferToBlockBlob(ctx context.Context, b []byte, - blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { - return uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), blockBlobURL, o) -} - -// UploadFileToBlockBlob uploads a file in blocks to a block blob. -func UploadFileToBlockBlob(ctx context.Context, file *os.File, - blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { - - stat, err := file.Stat() - if err != nil { - return nil, err - } - return uploadReaderAtToBlockBlob(ctx, file, stat.Size(), blockBlobURL, o) -} - -/////////////////////////////////////////////////////////////////////////////// - -const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB - -// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions. -type DownloadFromBlobOptions struct { - // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. - BlockSize int64 - - // Progress is a function that is invoked periodically as bytes are received. - Progress pipeline.ProgressReceiver - - // AccessConditions indicates the access conditions used when making HTTP GET requests against the blob. - AccessConditions BlobAccessConditions - - // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. - ClientProvidedKeyOptions ClientProvidedKeyOptions - - // Parallelism indicates the maximum number of blocks to download in parallel (0=default) - Parallelism uint16 - - // RetryReaderOptionsPerBlock is used when downloading each block. - RetryReaderOptionsPerBlock RetryReaderOptions -} - -// downloadBlobToWriterAt downloads an Azure blob to a buffer with parallel. -func downloadBlobToWriterAt(ctx context.Context, blobURL BlobURL, offset int64, count int64, - writer io.WriterAt, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { - if o.BlockSize == 0 { - o.BlockSize = BlobDefaultDownloadBlockSize - } - - if count == CountToEnd { // If size not specified, calculate it - if initialDownloadResponse != nil { - count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it - } else { - // If we don't have the length at all, get it - dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false, o.ClientProvidedKeyOptions) - if err != nil { - return err - } - count = dr.ContentLength() - offset - } - } - - if count <= 0 { - // The file is empty, there is nothing to download. - return nil - } - - // Prepare and do parallel download. - progress := int64(0) - progressLock := &sync.Mutex{} - - err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "downloadBlobToWriterAt", - TransferSize: count, - ChunkSize: o.BlockSize, - Parallelism: o.Parallelism, - Operation: func(chunkStart int64, count int64, ctx context.Context) error { - dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false, o.ClientProvidedKeyOptions) - if err != nil { - return err - } - body := dr.Body(o.RetryReaderOptionsPerBlock) - if o.Progress != nil { - rangeProgress := int64(0) - body = pipeline.NewResponseBodyProgress( - body, - func(bytesTransferred int64) { - diff := bytesTransferred - rangeProgress - rangeProgress = bytesTransferred - progressLock.Lock() - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) - body.Close() - return err - }, - }) - if err != nil { - return err - } - return nil -} - -// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel. -// Offset and count are optional, pass 0 for both to download the entire blob. -func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, - b []byte, o DownloadFromBlobOptions) error { - return downloadBlobToWriterAt(ctx, blobURL, offset, count, newBytesWriter(b), o, nil) -} - -// DownloadBlobToFile downloads an Azure blob to a local file. -// The file would be truncated if the size doesn't match. -// Offset and count are optional, pass 0 for both to download the entire blob. -func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, - file *os.File, o DownloadFromBlobOptions) error { - // 1. Calculate the size of the destination file - var size int64 - - if count == CountToEnd { - // Try to get Azure blob's size - props, err := blobURL.GetProperties(ctx, o.AccessConditions, o.ClientProvidedKeyOptions) - if err != nil { - return err - } - size = props.ContentLength() - offset - } else { - size = count - } - - // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. - stat, err := file.Stat() - if err != nil { - return err - } - if stat.Size() != size { - if err = file.Truncate(size); err != nil { - return err - } - } - - if size > 0 { - return downloadBlobToWriterAt(ctx, blobURL, offset, size, file, o, nil) - } else { // if the blob's size is 0, there is no need in downloading it - return nil - } -} - -/////////////////////////////////////////////////////////////////////////////// - -// BatchTransferOptions identifies options used by DoBatchTransfer. -type BatchTransferOptions struct { - TransferSize int64 - ChunkSize int64 - Parallelism uint16 - Operation func(offset int64, chunkSize int64, ctx context.Context) error - OperationName string -} - -// DoBatchTransfer helps to execute operations in a batch manner. -// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) -func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { - if o.ChunkSize == 0 { - return errors.New("ChunkSize cannot be 0") - } - - if o.Parallelism == 0 { - o.Parallelism = 5 // default Parallelism - } - - // Prepare and do parallel operations. - numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) - operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently - operationResponseChannel := make(chan error, numChunks) // Holds each response - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Create the goroutines that process each operation (in parallel). - for g := uint16(0); g < o.Parallelism; g++ { - //grIndex := g - go func() { - for f := range operationChannel { - err := f() - operationResponseChannel <- err - } - }() - } - - // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { - curChunkSize := o.ChunkSize - - if chunkNum == numChunks-1 { // Last chunk - curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total - } - offset := int64(chunkNum) * o.ChunkSize - - operationChannel <- func() error { - return o.Operation(offset, curChunkSize, ctx) - } - } - close(operationChannel) - - // Wait for the operations to complete. - var firstErr error = nil - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { - responseError := <-operationResponseChannel - // record the first error (the original error which should cause the other chunks to fail with canceled context) - if responseError != nil && firstErr == nil { - cancel() // As soon as any operation fails, cancel all remaining operation calls - firstErr = responseError - } - } - return firstErr -} - -//////////////////////////////////////////////////////////////////////////////////////////////// - -// TransferManager provides a buffer and thread pool manager for certain transfer options. -// It is undefined behavior if code outside of this package call any of these methods. -type TransferManager interface { - // Get provides a buffer that will be used to read data into and write out to the stream. - // It is guaranteed by this package to not read or write beyond the size of the slice. - Get() []byte - // Put may or may not put the buffer into underlying storage, depending on settings. - // The buffer must not be touched after this has been called. - Put(b []byte) - // Run will use a goroutine pool entry to run a function. This blocks until a pool - // goroutine becomes available. - Run(func()) - // Closes shuts down all internal goroutines. This must be called when the TransferManager - // will no longer be used. Not closing it will cause a goroutine leak. - Close() -} - -type staticBuffer struct { - buffers chan []byte - size int - threadpool chan func() -} - -// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer -// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This -// can be shared between calls if you wish to control maximum memory and concurrency with -// multiple concurrent calls. -func NewStaticBuffer(size, max int) (TransferManager, error) { - if size < 1 || max < 1 { - return nil, fmt.Errorf("cannot be called with size or max set to < 1") - } - - if size < _1MiB { - return nil, fmt.Errorf("cannot have size < 1MiB") - } - - threadpool := make(chan func(), max) - buffers := make(chan []byte, max) - for i := 0; i < max; i++ { - go func() { - for f := range threadpool { - f() - } - }() - - buffers <- make([]byte, size) - } - return staticBuffer{ - buffers: buffers, - size: size, - threadpool: threadpool, - }, nil -} - -// Get implements TransferManager.Get(). -func (s staticBuffer) Get() []byte { - return <-s.buffers -} - -// Put implements TransferManager.Put(). -func (s staticBuffer) Put(b []byte) { - select { - case s.buffers <- b: - default: // This shouldn't happen, but just in case they call Put() with there own buffer. - } -} - -// Run implements TransferManager.Run(). -func (s staticBuffer) Run(f func()) { - s.threadpool <- f -} - -// Close implements TransferManager.Close(). -func (s staticBuffer) Close() { - close(s.threadpool) - close(s.buffers) -} - -type syncPool struct { - threadpool chan func() - pool sync.Pool -} - -// NewSyncPool creates a TransferManager that will use a sync.Pool -// that can hold a non-capped number of buffers constrained by concurrency. This -// can be shared between calls if you wish to share memory and concurrency. -func NewSyncPool(size, concurrency int) (TransferManager, error) { - if size < 1 || concurrency < 1 { - return nil, fmt.Errorf("cannot be called with size or max set to < 1") - } - - if size < _1MiB { - return nil, fmt.Errorf("cannot have size < 1MiB") - } - - threadpool := make(chan func(), concurrency) - for i := 0; i < concurrency; i++ { - go func() { - for f := range threadpool { - f() - } - }() - } - - return &syncPool{ - threadpool: threadpool, - pool: sync.Pool{ - New: func() interface{} { - return make([]byte, size) - }, - }, - }, nil -} - -// Get implements TransferManager.Get(). -func (s *syncPool) Get() []byte { - return s.pool.Get().([]byte) -} - -// Put implements TransferManager.Put(). -func (s *syncPool) Put(b []byte) { - s.pool.Put(b) -} - -// Run implements TransferManager.Run(). -func (s *syncPool) Run(f func()) { - s.threadpool <- f -} - -// Close implements TransferManager.Close(). -func (s *syncPool) Close() { - close(s.threadpool) -} - -const _1MiB = 1024 * 1024 - -// UploadStreamToBlockBlobOptions is options for UploadStreamToBlockBlob. -type UploadStreamToBlockBlobOptions struct { - // TransferManager provides a TransferManager that controls buffer allocation/reuse and - // concurrency. This overrides BufferSize and MaxBuffers if set. - TransferManager TransferManager - transferMangerNotSet bool - // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. - BufferSize int - // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. - MaxBuffers int - BlobHTTPHeaders BlobHTTPHeaders - Metadata Metadata - AccessConditions BlobAccessConditions - BlobAccessTier AccessTierType - BlobTagsMap BlobTagsMap - ClientProvidedKeyOptions ClientProvidedKeyOptions - ImmutabilityPolicyOptions ImmutabilityPolicyOptions -} - -func (u *UploadStreamToBlockBlobOptions) defaults() error { - if u.TransferManager != nil { - return nil - } - - if u.MaxBuffers == 0 { - u.MaxBuffers = 1 - } - - if u.BufferSize < _1MiB { - u.BufferSize = _1MiB - } - - var err error - u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) - if err != nil { - return fmt.Errorf("bug: default transfer manager could not be created: %s", err) - } - u.transferMangerNotSet = true - return nil -} - -// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL. -// A Context deadline or cancellation will cause this to error. -func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, o UploadStreamToBlockBlobOptions) (CommonResponse, error) { - if err := o.defaults(); err != nil { - return nil, err - } - - // If we used the default manager, we need to close it. - if o.transferMangerNotSet { - defer o.TransferManager.Close() - } - - result, err := copyFromReader(ctx, reader, blockBlobURL, o) - if err != nil { - return nil, err - } - - return result, nil -} - -// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version. -// TODO: Remove on next minor release in v0 or before v1. -type UploadStreamOptions struct { - BufferSize int - MaxBuffers int -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go deleted file mode 100644 index 1cb90dc95df..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go +++ /dev/null @@ -1,56 +0,0 @@ -package azblob - -import ( - "time" -) - -// ClientProvidedKeyOptions contains headers which may be be specified from service version 2019-02-02 -// or higher to encrypts the data on the service-side with the given key. Use of customer-provided keys -// must be done over HTTPS. As the encryption key itself is provided in the request, a secure connection -// must be established to transfer the key. -// Note: Azure Storage does not store or manage customer provided encryption keys. Keys are securely discarded -// as soon as possible after they’ve been used to encrypt or decrypt the blob data. -// https://docs.microsoft.com/en-us/azure/storage/common/storage-service-encryption -// https://docs.microsoft.com/en-us/azure/storage/common/customer-managed-keys-overview -type ClientProvidedKeyOptions struct { - // A Base64-encoded AES-256 encryption key value. - EncryptionKey *string - - // The Base64-encoded SHA256 of the encryption key. - EncryptionKeySha256 *string - - // Specifies the algorithm to use when encrypting data using the given key. Must be AES256. - EncryptionAlgorithm EncryptionAlgorithmType - - // Specifies the name of the encryption scope to use to encrypt the data provided in the request - // https://docs.microsoft.com/en-us/azure/storage/blobs/encryption-scope-overview - // https://docs.microsoft.com/en-us/azure/key-vault/general/overview - EncryptionScope *string -} - -// NewClientProvidedKeyOptions function. -// By default the value of encryption algorithm params is "AES256" for service version 2019-02-02 or higher. -func NewClientProvidedKeyOptions(ek *string, eksha256 *string, es *string) (cpk ClientProvidedKeyOptions) { - cpk = ClientProvidedKeyOptions{} - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, cpk.EncryptionScope = ek, eksha256, EncryptionAlgorithmAES256, es - return cpk -} - -type ImmutabilityPolicyOptions struct { - // A container with object-level immutability enabled is required for any options. - // Both ImmutabilityPolicy options must be filled to set an immutability policy. - ImmutabilityPolicyUntilDate *time.Time - ImmutabilityPolicyMode BlobImmutabilityPolicyModeType - - LegalHold *bool -} - -func NewImmutabilityPolicyOptions(untilDate *time.Time, policyMode BlobImmutabilityPolicyModeType, legalHold *bool) ImmutabilityPolicyOptions { - opt := ImmutabilityPolicyOptions{} - opt.ImmutabilityPolicyUntilDate, opt.ImmutabilityPolicyMode, opt.LegalHold = untilDate, policyMode, legalHold - return opt -} - -func (pol *ImmutabilityPolicyOptions) pointers() (*time.Time, BlobImmutabilityPolicyModeType, *bool) { - return pol.ImmutabilityPolicyUntilDate, pol.ImmutabilityPolicyMode, pol.LegalHold -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go deleted file mode 100644 index 2719b7366a7..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go +++ /dev/null @@ -1,371 +0,0 @@ -package azblob - -import ( - "bytes" - "fmt" - "strings" - "time" -) - -// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas -type BlobSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - SnapshotTime time.Time - Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Identifier string `param:"si"` - ContainerName string - BlobName string // Use "" to create a Container SAS - Directory string // Not nil for a directory SAS (ie sr=d) - CacheControl string // rscc - ContentDisposition string // rscd - ContentEncoding string // rsce - ContentLanguage string // rscl - ContentType string // rsct - BlobVersion string // sr=bv - PreauthorizedAgentObjectId string - AgentObjectId string - CorrelationId string -} - -func getDirectoryDepth(path string) string { - if path == "" { - return "" - } - return fmt.Sprint(strings.Count(path, "/") + 1) -} - -// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce -// the proper SAS query parameters. -// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential -func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) { - resource := "c" - if credential == nil { - return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential") - } - - if !v.SnapshotTime.IsZero() { - resource = "bs" - //Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - } else if v.BlobVersion != "" { - resource = "bv" - //Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - } else if v.Directory != "" { - resource = "d" - v.BlobName = "" - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - } else if v.BlobName == "" { - // Make sure the permission characters are in the correct order - perms := &ContainerSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - } else { - resource = "b" - // Make sure the permission characters are in the correct order - perms := &BlobSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - } - if v.Version == "" { - v.Version = SASVersion - } - startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) - - signedIdentifier := v.Identifier - - udk := credential.getUDKParams() - - if udk != nil { - udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) - //I don't like this answer to combining the functions - //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. - signedIdentifier = strings.Join([]string{ - udk.SignedOid, - udk.SignedTid, - udkStart, - udkExpiry, - udk.SignedService, - udk.SignedVersion, - v.PreauthorizedAgentObjectId, - v.AgentObjectId, - v.CorrelationId, - }, "\n") - } - - // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - stringToSign := strings.Join([]string{ - v.Permissions, - startTime, - expiryTime, - getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName, v.Directory), - signedIdentifier, - v.IPRange.String(), - string(v.Protocol), - v.Version, - resource, - snapshotTime, // signed timestamp - v.CacheControl, // rscc - v.ContentDisposition, // rscd - v.ContentEncoding, // rsce - v.ContentLanguage, // rscl - v.ContentType}, // rsct - "\n") - - signature := "" - signature = credential.ComputeHMACSHA256(stringToSign) - - p := SASQueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - - // Container/Blob-specific SAS parameters - resource: resource, - identifier: v.Identifier, - cacheControl: v.CacheControl, - contentDisposition: v.ContentDisposition, - contentEncoding: v.ContentEncoding, - contentLanguage: v.ContentLanguage, - contentType: v.ContentType, - snapshotTime: v.SnapshotTime, - signedDirectoryDepth: getDirectoryDepth(v.Directory), - preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, - agentObjectId: v.AgentObjectId, - correlationId: v.CorrelationId, - // Calculated SAS signature - signature: signature, - } - - //User delegation SAS specific parameters - if udk != nil { - p.signedOid = udk.SignedOid - p.signedTid = udk.SignedTid - p.signedStart = udk.SignedStart - p.signedExpiry = udk.SignedExpiry - p.signedService = udk.SignedService - p.signedVersion = udk.SignedVersion - } - - return p, nil -} - -// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. -func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { - // Container: "/blob/account/containername" - // Blob: "/blob/account/containername/blobname" - elements := []string{"/blob/", account, "/", containerName} - if blobName != "" { - elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) - } else if directoryName != "" { - elements = append(elements, "/", directoryName) - } - return strings.Join(elements, "") -} - -// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. -// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. -// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob -type ContainerSASPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, Immutability bool - Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only -} - -// String produces the SAS permissions string for an Azure Storage container. -// Call this method to set BlobSASSignatureValues's Permissions field. -func (p ContainerSASPermissions) String() string { - var b bytes.Buffer - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Create { - b.WriteRune('c') - } - if p.Write { - b.WriteRune('w') - } - if p.Delete { - b.WriteRune('d') - } - if p.DeletePreviousVersion { - b.WriteRune('x') - } - if p.List { - b.WriteRune('l') - } - if p.Tag { - b.WriteRune('t') - } - if p.Execute { - b.WriteRune('e') - } - if p.ModifyOwnership { - b.WriteRune('o') - } - if p.ModifyPermissions { - b.WriteRune('p') - } - if p.Immutability { - b.WriteRune('i') - } - return b.String() -} - -// Parse initializes the ContainerSASPermissions's fields from a string. -func (p *ContainerSASPermissions) Parse(s string) error { - *p = ContainerSASPermissions{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'x': - p.DeletePreviousVersion = true - case 'l': - p.List = true - case 't': - p.Tag = true - case 'e': - p.Execute = true - case 'o': - p.ModifyOwnership = true - case 'p': - p.ModifyPermissions = true - case 'i': - p.Immutability = true - default: - return fmt.Errorf("invalid permission: '%v'", r) - } - } - return nil -} - -// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. -// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. -type BlobSASPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions, PermanentDelete, Immutability bool -} - -// String produces the SAS permissions string for an Azure Storage blob. -// Call this method to set BlobSASSignatureValues's Permissions field. -func (p BlobSASPermissions) String() string { - var b bytes.Buffer - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Create { - b.WriteRune('c') - } - if p.Write { - b.WriteRune('w') - } - if p.Delete { - b.WriteRune('d') - } - if p.DeletePreviousVersion { - b.WriteRune('x') - } - if p.Tag { - b.WriteRune('t') - } - if p.List { - b.WriteRune('l') - } - if p.Move { - b.WriteRune('m') - } - if p.Execute { - b.WriteRune('e') - } - if p.Ownership { - b.WriteRune('o') - } - if p.Permissions { - b.WriteRune('p') - } - if p.PermanentDelete { - b.WriteRune('y') - } - if p.Immutability { - b.WriteRune('i') - } - return b.String() -} - -// Parse initializes the BlobSASPermissions's fields from a string. -func (p *BlobSASPermissions) Parse(s string) error { - *p = BlobSASPermissions{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'x': - p.DeletePreviousVersion = true - case 't': - p.Tag = true - case 'l': - p.List = true - case 'm': - p.Move = true - case 'e': - p.Execute = true - case 'o': - p.Ownership = true - case 'p': - p.Permissions = true - case 'y': - p.PermanentDelete = true - case 'i': - p.Immutability = true - default: - return fmt.Errorf("invalid permission: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go deleted file mode 100644 index 6d86f6eb9df..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go +++ /dev/null @@ -1,47 +0,0 @@ -package azblob - -import ( - "errors" - "io" -) - -type sectionWriter struct { - count int64 - offset int64 - position int64 - writerAt io.WriterAt -} - -func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { - return §ionWriter{ - count: count, - offset: off, - writerAt: c, - } -} - -func (c *sectionWriter) Write(p []byte) (int, error) { - remaining := c.count - c.position - - if remaining <= 0 { - return 0, errors.New("End of section reached") - } - - slice := p - - if int64(len(slice)) > remaining { - slice = slice[:remaining] - } - - n, err := c.writerAt.WriteAt(slice, c.offset+c.position) - c.position += int64(n) - if err != nil { - return n, err - } - - if len(p) > n { - return n, errors.New("Not enough space for all bytes") - } - - return n, nil -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go deleted file mode 100644 index 292710cc349..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go +++ /dev/null @@ -1,198 +0,0 @@ -package azblob - -// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes - -// ServiceCode values indicate a service failure. -const ( - // ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met. - ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" - - // ServiceCodeBlobAlreadyExists means the specified blob already exists. - ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" - - // ServiceCodeBlobNotFound means the specified blob does not exist. - ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" - - // ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken. - ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" - - // ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length. - ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" - - // ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks - // or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks. - ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" - - // ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks. - ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" - - // ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set. - ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" - - // ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time. - // Examine the HTTP status code and message for more information about the failure. - ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" - - // ServiceCodeContainerAlreadyExists means the specified container already exists. - ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" - - // ServiceCodeContainerBeingDeleted means the specified container is being deleted. - ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" - - // ServiceCodeContainerDisabled means the specified container has been disabled by the administrator. - ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" - - // ServiceCodeContainerNotFound means the specified container does not exist. - ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" - - // ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit. - ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" - - // ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same. - ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" - - // ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation. - ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" - - // ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or - // that the operation for AppendBlob requires at least version 2015-02-21. - ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" - - // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. - ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" - - // ServiceCodeFeatureEncryptionMismatch means the given customer specified encryption does not match the encryption used to encrypt the blob. - ServiceCodeFeatureEncryptionMismatch ServiceCodeType = "BlobCustomerSpecifiedEncryptionMismatch" - - // ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. - ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" - - // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. - ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" - - // ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease. - ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" - - // ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid. - ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" - - // ServiceCodeInvalidBlobType means the blob type is invalid for this operation. - ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" - - // ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded. - ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" - - // ServiceCodeInvalidBlockList means the specified block list is invalid. - ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" - - // ServiceCodeInvalidOperation means an invalid operation against a blob snapshot. - ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" - - // ServiceCodeInvalidPageRange means the page range specified is invalid. - ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" - - // ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation. - ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" - - // ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL. - ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" - - // ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19. - ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" - - // ServiceCodeLeaseAlreadyPresent means there is already a lease present. - ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" - - // ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again. - ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" - - // ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob. - ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" - - // ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container. - ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" - - // ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container. - ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" - - // ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request. - ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" - - // ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken. - ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" - - // ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed. - ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" - - // ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed. - ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" - - // ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired. - ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" - - // ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob. - ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" - - // ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container. - ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" - - // ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container. - ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" - - // ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met. - ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" - - // ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation. - ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" - - // ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob. - ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" - - // ServiceCodePendingCopyOperation means there is currently a pending copy operation. - ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" - - // ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value. - ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" - - // ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found. - ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" - - // ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot. - ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" - - // ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met. - ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" - - // ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number. - ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" - - // ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded. - ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" - - // ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded. - ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" - - // ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots. - ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" - - // ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met. - ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" - - // ServiceCodeSystemInUse means this blob is in use by the system. - ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" - - // ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met. - ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" - - // ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites. - ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" - - // ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated. - ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" - - // ServiceCodeBlobArchived means this operation is not permitted on an archived blob. - ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" - - // ServiceCodeBlobNotArchived means this blob is currently not in the archived state. - ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" -) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go deleted file mode 100644 index b89b18bb411..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go +++ /dev/null @@ -1,8 +0,0 @@ -package azblob - -// StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential -type StorageAccountCredential interface { - AccountName() string - ComputeHMACSHA256(message string) (base64String string) - getUDKParams() *UserDelegationKey -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go deleted file mode 100644 index 0fdf038cadf..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go +++ /dev/null @@ -1,161 +0,0 @@ -package azblob - -import ( - "context" - "io" - "net/url" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -const ( - // AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock. - AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB - - // AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob. - AppendBlobMaxBlocks = 50000 -) - -// AppendBlobURL defines a set of operations applicable to append blobs. -type AppendBlobURL struct { - BlobURL - abClient appendBlobClient -} - -// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline. -func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { - blobClient := newBlobClient(url, p) - abClient := newAppendBlobClient(url, p) - return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} -} - -// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline. -func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { - return NewAppendBlobURL(ab.blobClient.URL(), p) -} - -// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { - p := NewBlobURLParts(ab.URL()) - p.Snapshot = snapshot - return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) -} - -// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (ab AppendBlobURL) WithVersionID(versionId string) AppendBlobURL { - p := NewBlobURLParts(ab.URL()) - p.VersionID = versionId - return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) -} - -func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { - return ab.blobClient.GetAccountInfo(ctx) -} - -// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*AppendBlobCreateResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() - blobTagsString := SerializeBlobTagsHeader(blobTagsMap) - immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() - return ab.abClient.Create(ctx, 0, nil, - &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, - &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V - cpk.EncryptionScope, // CPK-N - ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, - nil, // Blob ifTags - nil, - blobTagsString, // Blob tags - // immutability policy - immutabilityExpiry, immutabilityMode, legalHold, - ) -} - -// AppendBlock writes a stream to a new block of data to the end of the existing append blob. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. -func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return nil, err - } - return ab.abClient.AppendBlock(ctx, body, count, nil, - transactionalMD5, - nil, // CRC - ac.LeaseAccessConditions.pointers(), - ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - cpk.EncryptionScope, // CPK-N - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. -func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*AppendBlobAppendBlockFromURLResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() - sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() - ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers() - return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(), - transactionalMD5, nil, nil, nil, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - cpk.EncryptionScope, // CPK-N - destinationAccessConditions.LeaseAccessConditions.pointers(), - ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) -} - -type AppendBlobAccessConditions struct { - ModifiedAccessConditions - LeaseAccessConditions - AppendPositionAccessConditions -} - -// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set. -type AppendPositionAccessConditions struct { - // IfAppendPositionEqual ensures that the AppendBlock operation succeeds - // only if the append position is equal to a value. - // IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified. - // IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value - // IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0 - IfAppendPositionEqual int64 - - // IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds - // only if the append blob's size is less than or equal to a value. - // IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified. - // IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value - // IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0 - IfMaxSizeLessThanOrEqual int64 -} - -// pointers is for internal infrastructure. It returns the fields as pointers. -func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) { - var zero int64 // defaults to 0 - switch ac.IfAppendPositionEqual { - case -1: - iape = &zero - case 0: - iape = nil - default: - iape = &ac.IfAppendPositionEqual - } - - switch ac.IfMaxSizeLessThanOrEqual { - case -1: - imsltoe = &zero - case 0: - imsltoe = nil - default: - imsltoe = &ac.IfMaxSizeLessThanOrEqual - } - return -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go deleted file mode 100644 index 301d90825c1..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go +++ /dev/null @@ -1,363 +0,0 @@ -package azblob - -import ( - "context" - "net/url" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. -type BlobURL struct { - blobClient blobClient -} - -type BlobTagsMap map[string]string - -var DefaultAccessTier = AccessTierNone -var DefaultPremiumBlobAccessTier = PremiumPageBlobAccessTierNone - -// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline. -func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { - blobClient := newBlobClient(url, p) - return BlobURL{blobClient: blobClient} -} - -// URL returns the URL endpoint used by the BlobURL object. -func (b BlobURL) URL() url.URL { - return b.blobClient.URL() -} - -// String returns the URL as a string. -func (b BlobURL) String() string { - u := b.URL() - return u.String() -} - -func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { - return b.blobClient.GetAccountInfo(ctx) -} - -// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline. -func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { - return NewBlobURL(b.blobClient.URL(), p) -} - -// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (b BlobURL) WithSnapshot(snapshot string) BlobURL { - p := NewBlobURLParts(b.URL()) - p.Snapshot = snapshot - return NewBlobURL(p.URL(), b.blobClient.Pipeline()) -} - -// WithVersionID creates a new BlobURL object identical to the source but with the specified version id. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (b BlobURL) WithVersionID(versionID string) BlobURL { - p := NewBlobURLParts(b.URL()) - p.VersionID = versionID - return NewBlobURL(p.URL(), b.blobClient.Pipeline()) -} - -// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline. -func (b BlobURL) ToAppendBlobURL() AppendBlobURL { - return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) -} - -// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline. -func (b BlobURL) ToBlockBlobURL() BlockBlobURL { - return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) -} - -// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline. -func (b BlobURL) ToPageBlobURL() PageBlobURL { - return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) -} - -func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string { - if len(blobTagsMap) == 0 { - return nil - } - tags := make([]string, 0) - for key, val := range blobTagsMap { - tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) - } - //tags = tags[:len(tags)-1] - blobTagsString := strings.Join(tags, "&") - return &blobTagsString -} - -func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags { - if len(blobTagsMap) == 0 { - return BlobTags{} - } - blobTagSet := make([]BlobTag, 0, len(blobTagsMap)) - for key, val := range blobTagsMap { - blobTagSet = append(blobTagSet, BlobTag{Key: key, Value: val}) - } - return BlobTags{BlobTagSet: blobTagSet} -} - -// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. -// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end. -// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. -// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) -// Therefore it not required to pass these here. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool, cpk ClientProvidedKeyOptions) (*DownloadResponse, error) { - var xRangeGetContentMD5 *bool - if rangeGetContentMD5 { - xRangeGetContentMD5 = &rangeGetContentMD5 - } - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - dr, err := b.blobClient.Download(ctx, nil, nil, nil, - httpRange{offset: offset, count: count}.pointers(), - ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) - if err != nil { - return nil, err - } - return &DownloadResponse{ - b: b, - r: dr, - ctx: ctx, - getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, - }, err -} - -// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note 1: that deleting a blob also deletes all its snapshots. -// Note 2: Snapshot/VersionId are optional parameters which are part of request URL query params. -// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) -// Therefore it not required to pass these here. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil, BlobDeleteNone) -} - -// PermanentDelete permanently deletes soft-deleted snapshots & soft-deleted version blobs and is a dangerous operation and SHOULD NOT BE USED. -// WARNING: This operation should not be used unless you know exactly the implications. We will not provide support for this API. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. -func (b BlobURL) PermanentDelete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil, BlobDeletePermanent) -} - -// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. -// Each call to this operation replaces all existing tags attached to the blob. -// To remove all tags from the blob, call this operation with no tags set. -// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags -func (b BlobURL) SetTags(ctx context.Context, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, ifTags *string, blobTagsMap BlobTagsMap) (*BlobSetTagsResponse, error) { - tags := SerializeBlobTags(blobTagsMap) - return b.blobClient.SetTags(ctx, nil, nil, transactionalContentMD5, transactionalContentCrc64, nil, ifTags, nil, &tags) -} - -// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. -// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags -func (b BlobURL) GetTags(ctx context.Context, ifTags *string) (*BlobTags, error) { - return b.blobClient.GetTags(ctx, nil, nil, nil, nil, ifTags, nil) -} - -// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. -func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { - return b.blobClient.Undelete(ctx, nil, nil) -} - -// SetTier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account -// and on a block blob in a blob storage account (locally redundant storage only). -// A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. -// A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. -// Note: VersionId is an optional parameter which is part of request URL query params. -// It can be explicitly set by calling WithVersionID(versionID string) function and hence it not required to pass it here. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. -func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions, rehydratePriority RehydratePriorityType) (*BlobSetTierResponse, error) { - return b.blobClient.SetTier(ctx, tier, nil, - nil, // Blob versioning - nil, rehydratePriority, nil, lac.pointers(), - nil) // Blob ifTags -} - -// GetProperties returns the blob's properties. -// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. -// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) -// Therefore it not required to pass these here. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobGetPropertiesResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.GetProperties(ctx, nil, - nil, // Blob versioning - nil, ac.LeaseAccessConditions.pointers(), - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// SetHTTPHeaders changes a blob's HTTP headers. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.SetHTTPHeaders(ctx, nil, - &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, - ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - &h.ContentDisposition, nil) -} - -// SetMetadata changes a blob's metadata. -// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobSetMetadataResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V - cpk.EncryptionScope, // CPK-N - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// CreateSnapshot creates a read-only snapshot of a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobCreateSnapshotResponse, error) { - // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter - // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this - // performance hit. - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.CreateSnapshot(ctx, nil, metadata, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V - cpk.EncryptionScope, // CPK-N - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - ac.LeaseAccessConditions.pointers(), nil) -} - -// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between -// 15 to 60 seconds, or infinite (-1). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() - return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// RenewLease renews the blob's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() - return b.blobClient.RenewLease(ctx, leaseID, nil, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// ReleaseLease releases the blob's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() - return b.blobClient.ReleaseLease(ctx, leaseID, nil, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) -// constant to break a fixed-duration lease when it expires or an infinite lease immediately. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() - return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// ChangeLease changes the blob's lease ID. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. -func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() - return b.blobClient.ChangeLease(ctx, leaseID, proposedID, - nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. -const LeaseBreakNaturally = -1 - -func leasePeriodPointer(period int32) (p *int32) { - if period != LeaseBreakNaturally { - p = &period - } - return nil -} - -// StartCopyFromURL copies the data at the source URL to a blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobStartCopyFromURLResponse, error) { - srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() - dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() - dstLeaseID := dstac.LeaseAccessConditions.pointers() - blobTagsString := SerializeBlobTagsHeader(blobTagsMap) - return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, - tier, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince, - srcIfMatchETag, srcIfNoneMatchETag, - nil, // source ifTags - dstIfModifiedSince, dstIfUnmodifiedSince, - dstIfMatchETag, dstIfNoneMatchETag, - nil, // Blob ifTags - dstLeaseID, - nil, - blobTagsString, // Blob tags - nil, - // immutability policy - nil, BlobImmutabilityPolicyModeNone, nil, - ) -} - -// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. -func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { - return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) -} - -// SetImmutabilityPolicy sets a temporary immutability policy with an expiration date. The expiration date must be in the future. -// While the immutability policy is active, the blob can be read but not modified or deleted. -// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-time-based-retention-policy-overview (Feature overview) -// and https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-immutability-policy (REST API reference) -// A container with object-level immutability enabled is required. -func (b BlobURL) SetImmutabilityPolicy(ctx context.Context, expiry time.Time, mode BlobImmutabilityPolicyModeType, ifUnmodifiedSince *time.Time) (*BlobSetImmutabilityPolicyResponse, error) { - return b.blobClient.SetImmutabilityPolicy(ctx, nil, nil, ifUnmodifiedSince, &expiry, mode) -} - -// DeleteImmutabilityPolicy deletes a temporary immutability policy with an expiration date. -// While the immutability policy is active, the blob can be read but not modified or deleted. -// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-time-based-retention-policy-overview (Feature overview) -// and https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob-immutability-policy (REST API reference) -// A container with object-level immutability enabled is required. -func (b BlobURL) DeleteImmutabilityPolicy(ctx context.Context) (*BlobDeleteImmutabilityPolicyResponse, error) { - return b.blobClient.DeleteImmutabilityPolicy(ctx, nil, nil) -} - -// SetLegalHold enables a temporary immutability policy that can be applied for general data protection purposes. -// It stores the current blob version in a WORM (Write-Once Read-Many) state. While in effect, the blob can be read but not modified or deleted. -// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-legal-hold-overview (Feature overview) -// and https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-legal-hold (REST API reference) -// A container with object-level immutability enabled is required. -func (b BlobURL) SetLegalHold(ctx context.Context, legalHold bool) (*BlobSetLegalHoldResponse, error) { - return b.blobClient.SetLegalHold(ctx, legalHold, nil, nil) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go deleted file mode 100644 index ae0079e8e5d..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go +++ /dev/null @@ -1,182 +0,0 @@ -package azblob - -import ( - "context" - "io" - "net/url" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -const ( - // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. - BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB - - // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. - BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4000MiB - - // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. - BlockBlobMaxBlocks = 50000 -) - -// BlockBlobURL defines a set of operations applicable to block blobs. -type BlockBlobURL struct { - BlobURL - bbClient blockBlobClient -} - -// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline. -func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { - blobClient := newBlobClient(url, p) - bbClient := newBlockBlobClient(url, p) - return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} -} - -// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline. -func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { - return NewBlockBlobURL(bb.blobClient.URL(), p) -} - -// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { - p := NewBlobURLParts(bb.URL()) - p.Snapshot = snapshot - return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) -} - -// WithVersionID creates a new BlockBlobURRL object identical to the source but with the specified version id. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (bb BlockBlobURL) WithVersionID(versionId string) BlockBlobURL { - p := NewBlobURLParts(bb.URL()) - p.VersionID = versionId - return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) -} - -func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { - return bb.blobClient.GetAccountInfo(ctx) -} - -// Upload creates a new block blob or overwrites an existing block blob. -// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not -// supported with Upload; the content of the existing blob is overwritten with the new content. To -// perform a partial update of a block blob, use StageBlock and CommitBlockList. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*BlockBlobUploadResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - count, err := validateSeekableStreamAt0AndGetCount(body) - blobTagsString := SerializeBlobTagsHeader(blobTagsMap) - immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() - if err != nil { - return nil, err - } - return bb.bbClient.Upload(ctx, body, count, nil, nil, - &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, - &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V - cpk.EncryptionScope, // CPK-N - tier, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil, - blobTagsString, // Blob tags - // immutability policy - immutabilityExpiry, immutabilityMode, legalHold, - ) -} - -// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. -func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return nil, err - } - return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(), - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V - cpk.EncryptionScope, // CPK-N - nil) -} - -// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. -// If count is CountToEnd (0), then data is read from specified offset to the end. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. -func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*BlockBlobStageBlockFromURLResponse, error) { - sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() - return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - cpk.EncryptionScope, // CPK-N - destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) -} - -// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. -// In order to be written as part of a blob, a block must have been successfully written -// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob -// by uploading only those blocks that have changed, then committing the new and existing -// blocks together. Any blocks not specified in the block list and permanently deleted. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. -func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*BlockBlobCommitBlockListResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - blobTagsString := SerializeBlobTagsHeader(blobTagsMap) - immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() - return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, - &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil, - metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - cpk.EncryptionScope, // CPK-N - tier, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil, - blobTagsString, // Blob tags - // immutability policy - immutabilityExpiry, immutabilityMode, legalHold, - ) -} - -// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. -func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { - return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), - nil, // Blob ifTags - nil) -} - -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. -func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap, immutability ImmutabilityPolicyOptions, sourceAuthorization TokenCredential) (*BlobCopyFromURLResponse, error) { - srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() - dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() - dstLeaseID := dstac.LeaseAccessConditions.pointers() - blobTagsString := SerializeBlobTagsHeader(blobTagsMap) - immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() - return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, tier, - srcIfModifiedSince, srcIfUnmodifiedSince, - srcIfMatchETag, srcIfNoneMatchETag, - dstIfModifiedSince, dstIfUnmodifiedSince, - dstIfMatchETag, dstIfNoneMatchETag, - nil, // Blob ifTags - dstLeaseID, nil, srcContentMD5, - blobTagsString, // Blob tags - // immutability policy - immutabilityExpiry, immutabilityMode, legalHold, tokenCredentialPointers(sourceAuthorization)) -} - -// PutBlobFromURL synchronously creates a new Block Blob with data from the source URL up to a max length of 256MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob-from-url. -func (bb BlockBlobURL) PutBlobFromURL(ctx context.Context, h BlobHTTPHeaders, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, dstContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*BlockBlobPutBlobFromURLResponse, error) { - - srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() - dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() - dstLeaseID := dstac.LeaseAccessConditions.pointers() - blobTagsString := SerializeBlobTagsHeader(blobTagsMap) - - return bb.bbClient.PutBlobFromURL(ctx, 0, source.String(), nil, nil, - &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, dstContentMD5, &h.CacheControl, - metadata, dstLeaseID, &h.ContentDisposition, cpk.EncryptionKey, cpk.EncryptionKeySha256, - cpk.EncryptionAlgorithm, cpk.EncryptionScope, tier, dstIfModifiedSince, dstIfUnmodifiedSince, - dstIfMatchETag, dstIfNoneMatchETag, nil, srcIfModifiedSince, srcIfUnmodifiedSince, - srcIfMatchETag, srcIfNoneMatchETag, nil, nil, srcContentMD5, blobTagsString, nil, tokenCredentialPointers(sourceAuthorization)) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go deleted file mode 100644 index 8fd78619755..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go +++ /dev/null @@ -1,319 +0,0 @@ -package azblob - -import ( - "bytes" - "context" - "errors" - "fmt" - "net/url" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs. -type ContainerURL struct { - client containerClient -} - -// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline. -func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { - client := newContainerClient(url, p) - return ContainerURL{client: client} -} - -// URL returns the URL endpoint used by the ContainerURL object. -func (c ContainerURL) URL() url.URL { - return c.client.URL() -} - -// String returns the URL as a string. -func (c ContainerURL) String() string { - u := c.URL() - return u.String() -} - -func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { - return c.client.GetAccountInfo(ctx) -} - -// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline. -func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { - return NewContainerURL(c.URL(), p) -} - -// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of -// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL. -// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's -// NewBlobURL method. -func (c ContainerURL) NewBlobURL(blobName string) BlobURL { - blobURL := appendToURLPath(c.URL(), blobName) - return NewBlobURL(blobURL, c.client.Pipeline()) -} - -// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of -// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL. -// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's -// NewAppendBlobURL method. -func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { - blobURL := appendToURLPath(c.URL(), blobName) - return NewAppendBlobURL(blobURL, c.client.Pipeline()) -} - -// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of -// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL. -// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's -// NewBlockBlobURL method. -func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { - blobURL := appendToURLPath(c.URL(), blobName) - return NewBlockBlobURL(blobURL, c.client.Pipeline()) -} - -// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of -// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL. -// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's -// NewPageBlobURL method. -func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { - blobURL := appendToURLPath(c.URL(), blobName) - return NewPageBlobURL(blobURL, c.client.Pipeline()) -} - -// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. -func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { - return c.client.Create(ctx, nil, metadata, publicAccessType, nil, - nil, nil, // container encryption - ) -} - -// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. -func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { - if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { - return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") - } - - ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() - return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), - ifModifiedSince, ifUnmodifiedSince, nil) -} - -// GetProperties returns the container's properties. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. -func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { - // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. - // This allows us to not expose a GetProperties method at all simplifying the API. - return c.client.GetProperties(ctx, nil, ac.pointers(), nil) -} - -// SetMetadata sets the container's metadata. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. -func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { - if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { - return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") - } - ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers() - return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) -} - -// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. -func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { - return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) -} - -// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. -// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. -type AccessPolicyPermission struct { - Read, Add, Create, Write, Delete, List bool -} - -// String produces the access policy permission string for an Azure Storage container. -// Call this method to set AccessPolicy's Permission field. -func (p AccessPolicyPermission) String() string { - var b bytes.Buffer - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Create { - b.WriteRune('c') - } - if p.Write { - b.WriteRune('w') - } - if p.Delete { - b.WriteRune('d') - } - if p.List { - b.WriteRune('l') - } - return b.String() -} - -// Parse initializes the AccessPolicyPermission's fields from a string. -func (p *AccessPolicyPermission) Parse(s string) error { - *p = AccessPolicyPermission{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'l': - p.List = true - default: - return fmt.Errorf("invalid permission: '%v'", r) - } - } - return nil -} - -// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. -func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, - ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { - if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { - return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") - } - ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() - return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), - accessType, ifModifiedSince, ifUnmodifiedSince, nil) -} - -// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() - return c.client.AcquireLease(ctx, nil, &duration, &proposedID, - ifModifiedSince, ifUnmodifiedSince, nil) -} - -// RenewLease renews the container's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() - return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) -} - -// ReleaseLease releases the container's previously-acquired lease. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() - return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) -} - -// BreakLease breaks the container's previously-acquired lease (if it exists). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() - return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) -} - -// ChangeLease changes the container's lease ID. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. -func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) { - ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() - return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) -} - -// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the -// previously-returned Marker) to get the next segment. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { - prefix, include, maxResults := o.pointers() - return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) -} - -// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. -// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the -// previously-returned Marker) to get the next segment. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. -func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { - if o.Details.Snapshots { - return nil, errors.New("snapshots are not supported in this listing operation") - } - prefix, include, maxResults := o.pointers() - return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil) -} - -// ListBlobsSegmentOptions defines options available when calling ListBlobs. -type ListBlobsSegmentOptions struct { - Details BlobListingDetails // No IncludeType header is produced if "" - Prefix string // No Prefix header is produced if "" - - // SetMaxResults sets the maximum desired results you want the service to return. Note, the - // service may return fewer results than requested. - // MaxResults=0 means no 'MaxResults' header specified. - MaxResults int32 -} - -func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { - if o.Prefix != "" { - prefix = &o.Prefix - } - include = o.Details.slice() - if o.MaxResults != 0 { - maxResults = &o.MaxResults - } - return -} - -// BlobListingDetails indicates what additional information the service should return with each blob. -type BlobListingDetails struct { - Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, Permissions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool -} - -// string produces the Include query parameter's value. -func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { - items := []ListBlobsIncludeItemType{} - // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! - if d.Copy { - items = append(items, ListBlobsIncludeItemCopy) - } - if d.Deleted { - items = append(items, ListBlobsIncludeItemDeleted) - } - if d.DeletedWithVersions { - items = append(items, ListBlobsIncludeItemDeletedwithversions) - } - if d.ImmutabilityPolicy { - items = append(items, ListBlobsIncludeItemImmutabilitypolicy) - } - if d.LegalHold { - items = append(items, ListBlobsIncludeItemLegalhold) - } - if d.Metadata { - items = append(items, ListBlobsIncludeItemMetadata) - } - if d.Permissions { - items = append(items, ListBlobsIncludeItemPermissions) - } - if d.Snapshots { - items = append(items, ListBlobsIncludeItemSnapshots) - } - if d.UncommittedBlobs { - items = append(items, ListBlobsIncludeItemUncommittedblobs) - } - if d.Tags { - items = append(items, ListBlobsIncludeItemTags) - } - if d.Versions { - items = append(items, ListBlobsIncludeItemVersions) - } - return items -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go deleted file mode 100644 index dc57765b416..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go +++ /dev/null @@ -1,276 +0,0 @@ -package azblob - -import ( - "context" - "fmt" - "io" - "net/url" - "strconv" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -const ( - // PageBlobPageBytes indicates the number of bytes in a page (512). - PageBlobPageBytes = 512 - - // PageBlobMaxUploadPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. - PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB -) - -// PageBlobURL defines a set of operations applicable to page blobs. -type PageBlobURL struct { - BlobURL - pbClient pageBlobClient -} - -// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline. -func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { - blobClient := newBlobClient(url, p) - pbClient := newPageBlobClient(url, p) - return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} -} - -// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline. -func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { - return NewPageBlobURL(pb.blobClient.URL(), p) -} - -// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { - p := NewBlobURLParts(pb.URL()) - p.Snapshot = snapshot - return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) -} - -// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. -// Pass "" to remove the snapshot returning a URL to the base blob. -func (pb PageBlobURL) WithVersionID(versionId string) PageBlobURL { - p := NewBlobURLParts(pb.URL()) - p.VersionID = versionId - return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) -} - -func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { - return pb.blobClient.GetAccountInfo(ctx) -} - -// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier PremiumPageBlobAccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*PageBlobCreateResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - blobTagsString := SerializeBlobTagsHeader(blobTagsMap) - immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() - return pb.pbClient.Create(ctx, 0, size, nil, tier, - &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, - metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V - cpk.EncryptionScope, // CPK-N - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob tags - &sequenceNumber, nil, - blobTagsString, // Blob tags - // immutability policy - immutabilityExpiry, immutabilityMode, legalHold, - ) -} - -// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. -// This method panics if the stream is not at position 0. -// Note that the http client closes the body stream after the request is sent to the service. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesResponse, error) { - count, err := validateSeekableStreamAt0AndGetCount(body) - if err != nil { - return nil, err - } - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() - return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil, - PageRange{Start: offset, End: offset + count - 1}.pointers(), - ac.LeaseAccessConditions.pointers(), - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - cpk.EncryptionScope, // CPK-N - ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. -// The sourceOffset specifies the start offset of source data to copy from. -// The destOffset specifies the start offset of data in page blob will be written to. -// The count must be a multiple of 512 bytes. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. -func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*PageBlobUploadPagesFromURLResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() - sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() - ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers() - return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0, - *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil, - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V - cpk.EncryptionScope, // CPK-N - destinationAccessConditions.LeaseAccessConditions.pointers(), - ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) -} - -// ClearPages frees the specified pages from the page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobClearPagesResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() - return pb.pbClient.ClearPages(ctx, 0, nil, - PageRange{Start: offset, End: offset + count - 1}.pointers(), - ac.LeaseAccessConditions.pointers(), - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - cpk.EncryptionScope, // CPK-N - ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, - ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) -} - -// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return pb.pbClient.GetPageRanges(ctx, nil, nil, - httpRange{offset: offset, count: count}.pointers(), - ac.LeaseAccessConditions.pointers(), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - - return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot, - prevSnapshotURL, // Get managed disk diff - httpRange{offset: offset, count: count}.pointers(), - ac.LeaseAccessConditions.pointers(), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. -func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, - nil, // Get managed disk diff - httpRange{offset: offset, count: count}.pointers(), - ac.LeaseAccessConditions.pointers(), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil, // Blob ifTags - nil) -} - -// Resize resizes the page blob to the specified size (which must be a multiple of 512). -// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobResizeResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), - cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK - cpk.EncryptionScope, // CPK-N - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) -} - -// UpdateSequenceNumber sets the page blob's sequence number. -func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, - ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { - sn := &sequenceNumber - if action == SequenceNumberActionIncrement { - sn = nil - } - ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() - return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, - ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, - nil, sn, nil) -} - -// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. -// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. -// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and -// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. -func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - qp := source.Query() - qp.Set("snapshot", snapshot) - source.RawQuery = qp.Encode() - return pb.pbClient.CopyIncremental(ctx, source.String(), nil, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) -} - -func (pr PageRange) pointers() *string { - endOffset := strconv.FormatInt(int64(pr.End), 10) - asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) - return &asString -} - -type PageBlobAccessConditions struct { - ModifiedAccessConditions - LeaseAccessConditions - SequenceNumberAccessConditions -} - -// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set. -type SequenceNumberAccessConditions struct { - // IfSequenceNumberLessThan ensures that the page blob operation succeeds - // only if the blob's sequence number is less than a value. - // IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified. - // IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value - // IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0 - IfSequenceNumberLessThan int64 - - // IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds - // only if the blob's sequence number is less than or equal to a value. - // IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified. - // IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value - // IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0 - IfSequenceNumberLessThanOrEqual int64 - - // IfSequenceNumberEqual ensures that the page blob operation succeeds - // only if the blob's sequence number is equal to a value. - // IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified. - // IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value - // IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0 - IfSequenceNumberEqual int64 -} - -// pointers is for internal infrastructure. It returns the fields as pointers. -func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { - var zero int64 // Defaults to 0 - switch ac.IfSequenceNumberLessThan { - case -1: - snlt = &zero - case 0: - snlt = nil - default: - snlt = &ac.IfSequenceNumberLessThan - } - - switch ac.IfSequenceNumberLessThanOrEqual { - case -1: - snltoe = &zero - case 0: - snltoe = nil - default: - snltoe = &ac.IfSequenceNumberLessThanOrEqual - } - switch ac.IfSequenceNumberEqual { - case -1: - sne = &zero - case 0: - sne = nil - default: - sne = &ac.IfSequenceNumberEqual - } - return -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go deleted file mode 100644 index ce3ac97dc70..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go +++ /dev/null @@ -1,177 +0,0 @@ -package azblob - -import ( - "context" - "net/url" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -const ( - // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. - ContainerNameRoot = "$root" - - // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. - ContainerNameLogs = "$logs" -) - -// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers. -type ServiceURL struct { - client serviceClient -} - -// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline. -func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { - client := newServiceClient(primaryURL, p) - return ServiceURL{client: client} -} - -//GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. -//OAuth is required for this call, as well as any role that can delegate access to the storage account. -func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) { - sc := newServiceClient(s.client.url, s.client.p) - udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID) - if err != nil { - return UserDelegationCredential{}, err - } - return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil -} - -//TODO this was supposed to be generated -//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion -func NewKeyInfo(Start, Expiry time.Time) KeyInfo { - return KeyInfo{ - Start: Start.UTC().Format(SASTimeFormat), - Expiry: Expiry.UTC().Format(SASTimeFormat), - } -} - -func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { - return s.client.GetAccountInfo(ctx) -} - -// URL returns the URL endpoint used by the ServiceURL object. -func (s ServiceURL) URL() url.URL { - return s.client.URL() -} - -// String returns the URL as a string. -func (s ServiceURL) String() string { - u := s.URL() - return u.String() -} - -// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline. -func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { - return NewServiceURL(s.URL(), p) -} - -// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of -// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL. -// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's -// NewContainerURL method. -func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { - containerURL := appendToURLPath(s.URL(), containerName) - return NewContainerURL(containerURL, s.client.Pipeline()) -} - -// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) -func appendToURLPath(u url.URL, name string) url.URL { - // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" - // When you call url.Parse() this is what you'll get: - // Scheme: "https" - // Opaque: "" - // User: nil - // Host: "ms.com" - // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash - // RawPath: "" - // ForceQuery: false - // RawQuery: "k1=v1&k2=v2" - // Fragment: "f" - if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { - u.Path += "/" // Append "/" to end before appending name - } - u.Path += name - return u -} - -// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Container names are returned in lexicographic order. -// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the -// previously-returned Marker) to get the next segment. For more information, see -// https://docs.microsoft.com/rest/api/storageservices/list-containers2. -func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) { - prefix, include, maxResults := o.pointers() - return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) -} - -// ListContainersOptions defines options available when calling ListContainers. -type ListContainersSegmentOptions struct { - Detail ListContainersDetail // No IncludeType header is produced if "" - Prefix string // No Prefix header is produced if "" - MaxResults int32 // 0 means unspecified - // TODO: update swagger to generate this type? -} - -func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []ListContainersIncludeType, maxResults *int32) { - if o.Prefix != "" { - prefix = &o.Prefix - } - if o.MaxResults != 0 { - maxResults = &o.MaxResults - } - details := o.Detail.string() - if len(details) > 0 { - include = []ListContainersIncludeType{ListContainersIncludeType(details)} - } - return -} - -// ListContainersFlatDetail indicates what additional information the service should return with each container. -type ListContainersDetail struct { - // Tells the service whether to return metadata for each container. - Metadata bool - - // Show containers that have been deleted when the soft-delete feature is enabled. - // Deleted bool -} - -// string produces the Include query parameter's value. -func (d *ListContainersDetail) string() string { - items := make([]string, 0, 2) - // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! - if d.Metadata { - items = append(items, string(ListContainersIncludeMetadata)) - } - // if d.Deleted { - // items = append(items, string(ListContainersIncludeDeleted)) - // } - if len(items) > 0 { - return strings.Join(items, ",") - } - return string(ListContainersIncludeNone) -} - -func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { - return bsu.client.GetProperties(ctx, nil, nil) -} - -func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { - return bsu.client.SetProperties(ctx, properties, nil, nil) -} - -func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { - return bsu.client.GetStatistics(ctx, nil, nil) -} - -// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. -// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. -// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags -// eg. "dog='germanshepherd' and penguin='emperorpenguin'" -// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" -func (bsu ServiceURL) FindBlobsByTags(ctx context.Context, timeout *int32, requestID *string, where *string, marker Marker, maxResults *int32) (*FilterBlobSegment, error) { - return bsu.client.FilterBlobs(ctx, timeout, requestID, where, marker.Val, maxResults) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go deleted file mode 100644 index 9fcbbc4092c..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go +++ /dev/null @@ -1,38 +0,0 @@ -package azblob - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" -) - -// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it -func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential { - return UserDelegationCredential{ - accountName: accountName, - accountKey: key, - } -} - -type UserDelegationCredential struct { - accountName string - accountKey UserDelegationKey -} - -// AccountName returns the Storage account's name -func (f UserDelegationCredential) AccountName() string { - return f.accountName -} - -// ComputeHMAC -func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) { - bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value) - h := hmac.New(sha256.New, bytes) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -// Private method to return important parameters for NewSASQueryParameters -func (f UserDelegationCredential) getUDKParams() *UserDelegationKey { - return &f.accountKey -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go deleted file mode 100644 index d89ccb09c14..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package azblob - -const serviceLibVersion = "0.15" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go deleted file mode 100644 index a81987d54a3..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go +++ /dev/null @@ -1,55 +0,0 @@ -package azblob - -import ( - "context" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// Credential represent any credential type; it is used to create a credential policy Factory. -type Credential interface { - pipeline.Factory - credentialMarker() -} - -type credentialFunc pipeline.FactoryFunc - -func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return f(next, po) -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (credentialFunc) credentialMarker() {} - -////////////////////////////// - -// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource -// or for use with Shared Access Signatures (SAS). -func NewAnonymousCredential() Credential { - return anonymousCredentialFactory -} - -var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton - -// anonymousCredentialPolicyFactory is the credential's policy factory. -type anonymousCredentialPolicyFactory struct { -} - -// New creates a credential policy object. -func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return &anonymousCredentialPolicy{next: next} -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*anonymousCredentialPolicyFactory) credentialMarker() {} - -// anonymousCredentialPolicy is the credential's policy object. -type anonymousCredentialPolicy struct { - next pipeline.Policy -} - -// Do implements the credential's policy interface. -func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - // For anonymous credentials, this is effectively a no-op - return p.next.Do(ctx, request) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go deleted file mode 100644 index cc59cbbed5d..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go +++ /dev/null @@ -1,205 +0,0 @@ -package azblob - -import ( - "bytes" - "context" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "errors" - "net/http" - "net/url" - "sort" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the -// storage account's name and either its primary or secondary key. -func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { - bytes, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return &SharedKeyCredential{}, err - } - return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil -} - -// SharedKeyCredential contains an account's name and its primary or secondary key. -// It is immutable making it shareable and goroutine-safe. -type SharedKeyCredential struct { - // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only - accountName string - accountKey []byte -} - -// AccountName returns the Storage account's name. -func (f SharedKeyCredential) AccountName() string { - return f.accountName -} - -func (f SharedKeyCredential) getAccountKey() []byte { - return f.accountKey -} - -// noop function to satisfy StorageAccountCredential interface -func (f SharedKeyCredential) getUDKParams() *UserDelegationKey { - return nil -} - -// New creates a credential policy object. -func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - // Add a x-ms-date header if it doesn't already exist - if d := request.Header.Get(headerXmsDate); d == "" { - request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} - } - stringToSign, err := f.buildStringToSign(request) - if err != nil { - return nil, err - } - signature := f.ComputeHMACSHA256(stringToSign) - authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") - request.Header[headerAuthorization] = []string{authHeader} - - response, err := next.Do(ctx, request) - if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { - // Service failed to authenticate request, log it - po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") - } - return response, err - }) -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*SharedKeyCredential) credentialMarker() {} - -// Constants ensuring that header names are correctly spelled and consistently cased. -const ( - headerAuthorization = "Authorization" - headerCacheControl = "Cache-Control" - headerContentEncoding = "Content-Encoding" - headerContentDisposition = "Content-Disposition" - headerContentLanguage = "Content-Language" - headerContentLength = "Content-Length" - headerContentMD5 = "Content-MD5" - headerContentType = "Content-Type" - headerDate = "Date" - headerIfMatch = "If-Match" - headerIfModifiedSince = "If-Modified-Since" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" - headerUserAgent = "User-Agent" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" -) - -// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. -func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { - h := hmac.New(sha256.New, f.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - headers := request.Header - contentLength := headers.Get(headerContentLength) - if contentLength == "0" { - contentLength = "" - } - - canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) - if err != nil { - return "", err - } - - stringToSign := strings.Join([]string{ - request.Method, - headers.Get(headerContentEncoding), - headers.Get(headerContentLanguage), - contentLength, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - "", // Empty date because x-ms-date is expected (as per web page above) - headers.Get(headerIfModifiedSince), - headers.Get(headerIfMatch), - headers.Get(headerIfNoneMatch), - headers.Get(headerIfUnmodifiedSince), - headers.Get(headerRange), - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - return stringToSign, nil -} - -func buildCanonicalizedHeader(headers http.Header) string { - cm := map[string][]string{} - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v // NOTE: the value must not have any whitespace around it. - } - } - if len(cm) == 0 { - return "" - } - - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - sort.Strings(keys) - ch := bytes.NewBufferString("") - for i, key := range keys { - if i > 0 { - ch.WriteRune('\n') - } - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(strings.Join(cm[key], ",")) - } - return ch.String() -} - -func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - cr := bytes.NewBufferString("/") - cr.WriteString(f.accountName) - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } else { - // a slash is required to indicate the root path - cr.WriteString("/") - } - - // params is a map[string][]string; param name is key; params values is []string - params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values - if err != nil { - return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") - } - - if len(params) > 0 { // There is at least 1 query parameter - paramNames := []string{} // We use this to sort the parameter key names - for paramName := range params { - paramNames = append(paramNames, paramName) // paramNames must be lowercase - } - sort.Strings(paramNames) - - for _, paramName := range paramNames { - paramValues := params[paramName] - sort.Strings(paramValues) - - // Join the sorted key values separated by ',' - // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) - } - } - return cr.String(), nil -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go deleted file mode 100644 index 19d8ea41881..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go +++ /dev/null @@ -1,146 +0,0 @@ -package azblob - -import ( - "context" - "errors" - "sync/atomic" - - "runtime" - "sync" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// TokenRefresher represents a callback method that you write; this method is called periodically -// so you can refresh the token credential's value. -type TokenRefresher func(credential TokenCredential) time.Duration - -// TokenCredential represents a token credential (which is also a pipeline.Factory). -type TokenCredential interface { - Credential - Token() string - SetToken(newToken string) -} - -func tokenCredentialPointers(credential TokenCredential) *string { - if credential == nil { - return nil - } - - out := "Bearer " + credential.Token() - return &out -} - -// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage -// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for -// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the -// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration -// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. -// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your -// TokenCredential object from ever invoking tokenRefresher again. Also, one way to deal with failing to refresh a -// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline. -func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { - tc := &tokenCredential{} - tc.SetToken(initialToken) // We don't set it above to guarantee atomicity - if tokenRefresher == nil { - return tc // If no callback specified, return the simple tokenCredential - } - - tcwr := &tokenCredentialWithRefresh{token: tc} - tcwr.token.startRefresh(tokenRefresher) - runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { - deadTC.token.stopRefresh() - deadTC.token = nil // Sanity (not really required) - }) - return tcwr -} - -// tokenCredentialWithRefresh is a wrapper over a token credential. -// When this wrapper object gets GC'd, it stops the tokenCredential's timer -// which allows the tokenCredential object to also be GC'd. -type tokenCredentialWithRefresh struct { - token *tokenCredential -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*tokenCredentialWithRefresh) credentialMarker() {} - -// Token returns the current token value -func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } - -// SetToken changes the current token value -func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } - -// New satisfies pipeline.Factory's New method creating a pipeline policy object. -func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return f.token.New(next, po) -} - -// ///////////////////////////////////////////////////////////////////////////// - -// tokenCredential is a pipeline.Factory is the credential's policy factory. -type tokenCredential struct { - token atomic.Value - - // The members below are only used if the user specified a tokenRefresher callback function. - timer *time.Timer - tokenRefresher TokenRefresher - lock sync.Mutex - stopped bool -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*tokenCredential) credentialMarker() {} - -// Token returns the current token value -func (f *tokenCredential) Token() string { return f.token.Load().(string) } - -// SetToken changes the current token value -func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } - -// startRefresh calls refresh which immediately calls tokenRefresher -// and then starts a timer to call tokenRefresher in the future. -func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { - f.tokenRefresher = tokenRefresher - f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again - f.refresh() -} - -// refresh calls the user's tokenRefresher so they can refresh the token (by -// calling SetToken) and then starts another time (based on the returned duration) -// in order to refresh the token again in the future. -func (f *tokenCredential) refresh() { - d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock - if d > 0 { // If duration is 0 or negative, refresher wants to not be called again - f.lock.Lock() - if !f.stopped { - f.timer = time.AfterFunc(d, f.refresh) - } - f.lock.Unlock() - } -} - -// stopRefresh stops any pending timer and sets stopped field to true to prevent -// any new timer from starting. -// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object. -func (f *tokenCredential) stopRefresh() { - f.lock.Lock() - f.stopped = true - if f.timer != nil { - f.timer.Stop() - } - f.lock.Unlock() -} - -// New satisfies pipeline.Factory's New method creating a pipeline policy object. -func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - if request.URL.Scheme != "https" { - // HTTPS must be used, otherwise the tokens are at the risk of being exposed - return nil, errors.New("token credentials require a URL using the https protocol scheme") - } - request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} - return next.Do(ctx, request) - }) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go deleted file mode 100644 index ba99255c140..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go +++ /dev/null @@ -1,45 +0,0 @@ -package azblob - -import ( - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// PipelineOptions is used to configure a request policy pipeline's retry policy and logging. -type PipelineOptions struct { - // Log configures the pipeline's logging infrastructure indicating what information is logged and where. - Log pipeline.LogOptions - - // Retry configures the built-in retry policy behavior. - Retry RetryOptions - - // RequestLog configures the built-in request logging policy. - RequestLog RequestLogOptions - - // Telemetry configures the built-in telemetry policy behavior. - Telemetry TelemetryOptions - - // HTTPSender configures the sender of HTTP requests - HTTPSender pipeline.Factory -} - -// NewPipeline creates a Pipeline using the specified credentials and options. -func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { - // Closest to API goes first; closest to the wire goes last - f := []pipeline.Factory{ - NewTelemetryPolicyFactory(o.Telemetry), - NewUniqueRequestIDPolicyFactory(), - NewRetryPolicyFactory(o.Retry), - } - - if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { - // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything - // NOTE: The credential's policy factory must appear close to the wire so it can sign any - // changes made by other factories (like UniqueRequestIDPolicyFactory) - f = append(f, c) - } - f = append(f, - NewRequestLogPolicyFactory(o.RequestLog), - pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked - - return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log}) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go deleted file mode 100644 index ddc83cc787e..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go +++ /dev/null @@ -1,194 +0,0 @@ -package azblob - -import ( - "bytes" - "context" - "fmt" - "net/http" - "net/url" - "runtime" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// RequestLogOptions configures the retry policy's behavior. -type RequestLogOptions struct { - // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified - // duration (-1=no logging; 0=default threshold). - LogWarningIfTryOverThreshold time.Duration - - // SyslogDisabled is a flag to check if logging to Syslog/Windows-Event-Logger is enabled or not - // We by default print to Syslog/Windows-Event-Logger. - // If SyslogDisabled is not provided explicitly, the default value will be false. - SyslogDisabled bool -} - -func (o RequestLogOptions) defaults() RequestLogOptions { - if o.LogWarningIfTryOverThreshold == 0 { - // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/ - // But this monitors the time to get the HTTP response; NOT the time to download the response body. - o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds - } - return o -} - -// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options. -func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { - o = o.defaults() // Force defaults to be calculated - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - // These variables are per-policy; shared by multiple calls to Do - var try int32 - operationStart := time.Now() // If this is the 1st try, record the operation state time - return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { - try++ // The first try is #1 (not #0) - - // Log the outgoing request as informational - if po.ShouldLog(pipeline.LogInfo) { - b := &bytes.Buffer{} - fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) - pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) - po.Log(pipeline.LogInfo, b.String()) - } - - // Set the time for this particular retry operation and then Do the operation. - tryStart := time.Now() - response, err = next.Do(ctx, request) // Make the request - tryEnd := time.Now() - tryDuration := tryEnd.Sub(tryStart) - opDuration := tryEnd.Sub(operationStart) - - logLevel, forceLog := pipeline.LogInfo, false // Default logging information - - // If the response took too long, we'll upgrade to warning. - if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { - // Log a warning if the try duration exceeded the specified threshold - logLevel, forceLog = pipeline.LogWarning, !o.SyslogDisabled - } - - var sc int - if err == nil { // We got a valid response from the service - sc = response.Response().StatusCode - } else { // We got an error, so we should inspect if we got a response - if se, ok := err.(StorageError); ok { - if r := se.Response(); r != nil { - sc = r.StatusCode - } - } - } - - if sc == 0 || ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && - sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { - logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled // Promote to Error any 4xx (except those listed is an error) or any 5xx - } else { - // For other status codes, we leave the level as is. - } - - if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { - // We're going to log this; build the string to log - b := &bytes.Buffer{} - slow := "" - if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { - slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) - } - fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) - if err != nil { // This HTTP request did not get a response from the service - fmt.Fprint(b, "REQUEST ERROR\n") - } else { - if logLevel == pipeline.LogError { - fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") - } else { - fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") - } - } - - pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) - if logLevel <= pipeline.LogError { - b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation) - } - msg := b.String() - - if forceLog { - pipeline.ForceLog(logLevel, msg) - } - if shouldLog { - po.Log(logLevel, msg) - } - } - return response, err - } - }) -} - -// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret. -func RedactSigQueryParam(rawQuery string) (bool, string) { - rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig= - sigFound := strings.Contains(rawQuery, "?sig=") - if !sigFound { - sigFound = strings.Contains(rawQuery, "&sig=") - if !sigFound { - return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation) - } - } - // [?|&]sig= found, redact its value - values, _ := url.ParseQuery(rawQuery) - for name := range values { - if strings.EqualFold(name, "sig") { - values[name] = []string{"REDACTED"} - } - } - return sigFound, values.Encode() -} - -func prepareRequestForLogging(request pipeline.Request) *http.Request { - req := request - if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound { - // Make copy so we don't destroy the query parameters we actually need to send in the request - req = request.Copy() - req.Request.URL.RawQuery = rawQuery - } - - return prepareRequestForServiceLogging(req) -} - -func stack() []byte { - buf := make([]byte, 1024) - for { - n := runtime.Stack(buf, false) - if n < len(buf) { - return buf[:n] - } - buf = make([]byte, 2*len(buf)) - } -} - -/////////////////////////////////////////////////////////////////////////////////////// -// Redact phase useful for blob and file service only. For other services, -// this method can directly return request.Request. -/////////////////////////////////////////////////////////////////////////////////////// -func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { - req := request - if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { - req = request.Copy() - url, err := url.Parse(req.Header.Get(key)) - if err == nil { - if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound { - url.RawQuery = rawQuery - req.Header.Set(xMsCopySourceHeader, url.String()) - } - } - } - return req.Request -} - -const xMsCopySourceHeader = "x-ms-copy-source" - -func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { - for keyInHeader := range header { - if strings.EqualFold(keyInHeader, key) { - return true, keyInHeader - } - } - return false, "" -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go deleted file mode 100644 index 6286431a836..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go +++ /dev/null @@ -1,419 +0,0 @@ -package azblob - -import ( - "context" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "net/http" - "strconv" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. -type RetryPolicy int32 - -const ( - // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy - RetryPolicyExponential RetryPolicy = 0 - - // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy - RetryPolicyFixed RetryPolicy = 1 -) - -// RetryOptions configures the retry policy's behavior. -type RetryOptions struct { - // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\ - // A value of zero means that you accept our default policy. - Policy RetryPolicy - - // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default). - // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries. - MaxTries int32 - - // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. - // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts - // of data, the default TryTimeout will probably not be sufficient. You should override this value - // based on the bandwidth available to the host machine and proximity to the Storage service. A good - // starting point may be something like (60 seconds per MB of anticipated-payload-size). - TryTimeout time.Duration - - // RetryDelay specifies the amount of delay to use before retrying an operation (0=default). - // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially - // with each retry up to a maximum specified by MaxRetryDelay. - // If you specify 0, then you must also specify 0 for MaxRetryDelay. - // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be - // equal to or greater than RetryDelay. - RetryDelay time.Duration - - // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). - // If you specify 0, then you must also specify 0 for RetryDelay. - MaxRetryDelay time.Duration - - // RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host. - // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. - // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent - // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs - RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs -} - -func (o RetryOptions) retryReadsFromSecondaryHost() string { - return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only - //return "" // This is for non-blob SDKs -} - -func (o RetryOptions) defaults() RetryOptions { - // We assume the following: - // 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed - // 2. o.MaxTries >= 0 - // 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0 - // 4. o.RetryDelay <= o.MaxRetryDelay - // 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0 - - IfDefault := func(current *time.Duration, desired time.Duration) { - if *current == time.Duration(0) { - *current = desired - } - } - - // Set defaults if unspecified - if o.MaxTries == 0 { - o.MaxTries = 4 - } - switch o.Policy { - case RetryPolicyExponential: - IfDefault(&o.TryTimeout, 1*time.Minute) - IfDefault(&o.RetryDelay, 4*time.Second) - IfDefault(&o.MaxRetryDelay, 120*time.Second) - - case RetryPolicyFixed: - IfDefault(&o.TryTimeout, 1*time.Minute) - IfDefault(&o.RetryDelay, 30*time.Second) - IfDefault(&o.MaxRetryDelay, 120*time.Second) - } - return o -} - -func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0 - pow := func(number int64, exponent int32) int64 { // pow is nested helper function - var result int64 = 1 - for n := int32(0); n < exponent; n++ { - result *= number - } - return result - } - - delay := time.Duration(0) - switch o.Policy { - case RetryPolicyExponential: - delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay - - case RetryPolicyFixed: - if try > 1 { // Any try after the 1st uses the fixed delay - delay = o.RetryDelay - } - } - - // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) - // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 - delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand - if delay > o.MaxRetryDelay { - delay = o.MaxRetryDelay - } - return delay -} - -// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. -func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { - o = o.defaults() // Force defaults to be calculated - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { - // Before each try, we'll select either the primary or secondary URL. - primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC - - // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use - considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" - - // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) - // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable - // If using a secondary: - // Even tries go against primary; odd tries go against the secondary - // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) - // If secondary gets a 404, don't fail, retry but future retries are only against the primary - // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) - for try := int32(1); try <= o.MaxTries; try++ { - logf("\n=====> Try=%d\n", try) - - // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. - tryingPrimary := !considerSecondary || (try%2 == 1) - // Select the correct host and delay - if tryingPrimary { - primaryTry++ - delay := o.calcDelay(primaryTry) - logf("Primary try=%d, Delay=%v\n", primaryTry, delay) - time.Sleep(delay) // The 1st try returns 0 delay - } else { - // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 - delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8)) - logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) - time.Sleep(delay) // Delay with some jitter before trying secondary - } - - // Clone the original request to ensure that each try starts with the original (unmutated) request. - requestCopy := request.Copy() - - // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because - // the stream may not be at offset 0 when we first get it and we want the same behavior for the - // 1st try as for additional tries. - err = requestCopy.RewindBody() - if err != nil { - return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") - } - - if !tryingPrimary { - requestCopy.URL.Host = o.retryReadsFromSecondaryHost() - requestCopy.Host = o.retryReadsFromSecondaryHost() - } - - // Set the server-side timeout query parameter "timeout=[seconds]" - timeout := o.TryTimeout // Max time per try - if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := deadline.Sub(time.Now()) // Duration from now until user's ctx reaches its deadline - logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", int32(timeout.Seconds()), int32(t.Seconds())) - if t < timeout { - timeout = t - } - if timeout < 0 { - timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging - } - logf("TryTimeout adjusted to=%d sec\n", int32(timeout.Seconds())) - } - q := requestCopy.Request.URL.Query() - q.Set("timeout", strconv.Itoa(int(timeout.Seconds()+1))) // Add 1 to "round up" - requestCopy.Request.URL.RawQuery = q.Encode() - logf("Url=%s\n", requestCopy.Request.URL.String()) - - // Set the time for this particular retry operation and then Do the operation. - tryCtx, tryCancel := context.WithTimeout(ctx, timeout) - //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} - response, err = next.Do(tryCtx, requestCopy) // Make the request - /*err = improveDeadlineExceeded(err) - if err == nil { - response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} - }*/ - logf("Err=%v, response=%v\n", err, response) - - action := "" // This MUST get changed within the switch code below - switch { - case ctx.Err() != nil: - action = "NoRetry: Op timeout" - case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound: - // If attempt was against the secondary & it returned a StatusNotFound (404), then - // the resource was not found. This may be due to replication delay. So, in this - // case, we'll never try the secondary again for this operation. - considerSecondary = false - action = "Retry: Secondary URL returned 404" - case err != nil: - // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation. - // Use ServiceCode to verify if the error is related to storage service-side, - // ServiceCode is set only when error related to storage service happened. - if stErr, ok := err.(StorageError); ok { - if stErr.Temporary() { - action = "Retry: StorageError with error service code and Temporary()" - } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError - action = "Retry: StorageError with success status code" - } else { - action = "NoRetry: StorageError not Temporary() and without retriable status code" - } - } else if netErr, ok := err.(net.Error); ok { - // Use non-retriable net.Error list, but not retriable list. - // As there are errors without Temporary() implementation, - // while need be retried, like 'connection reset by peer', 'transport connection broken' and etc. - // So the SDK do retry for most of the case, unless the error should not be retried for sure. - if !isNotRetriable(netErr) { - action = "Retry: net.Error and not in the non-retriable list" - } else { - action = "NoRetry: net.Error and in the non-retriable list" - } - } else if err == io.ErrUnexpectedEOF { - action = "Retry: unexpected EOF" - } else { - action = "NoRetry: unrecognized error" - } - default: - action = "NoRetry: successful HTTP request" // no error - } - - logf("Action=%s\n", action) - // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying - if action[0] != 'R' { // Retry only if action starts with 'R' - if err != nil { - tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context - } else { - // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. - // So, when the user closes the Body, then our per-try context gets closed too. - // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) - if response == nil || response.Response() == nil { - // We do panic in the case response or response.Response() is nil, - // as for client, the response should not be nil if request is sent and the operations is executed successfully. - // Another option, is that execute the cancel function when response or response.Response() is nil, - // as in this case, current per-try has nothing to do in future. - return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") - } - if response.Response().Body == http.NoBody { - // If the response is empty the caller isn't obligated to call close - tryCancel(); - } else { - response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} - } - } - break // Don't retry - } - if response != nil && response.Response() != nil && response.Response().Body != nil { - // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - body := response.Response().Body - io.Copy(ioutil.Discard, body) - body.Close() - } - // If retrying, cancel the current per-try timeout context - tryCancel() - } - return response, err // Not retryable or too many retries; return the last response/error - } - }) -} - -// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed. -type contextCancelReadCloser struct { - cf context.CancelFunc - body io.ReadCloser -} - -func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { - return rc.body.Read(p) -} - -func (rc *contextCancelReadCloser) Close() error { - err := rc.body.Close() - if rc.cf != nil { - rc.cf() - } - return err -} - -// isNotRetriable checks if the provided net.Error isn't retriable. -func isNotRetriable(errToParse net.Error) bool { - // No error, so this is NOT retriable. - if errToParse == nil { - return true - } - - // The error is either temporary or a timeout so it IS retriable (not not retriable). - if errToParse.Temporary() || errToParse.Timeout() { - return false - } - - genericErr := error(errToParse) - - // From here all the error are neither Temporary() nor Timeout(). - switch err := errToParse.(type) { - case *net.OpError: - // The net.Error is also a net.OpError but the inner error is nil, so this is not retriable. - if err.Err == nil { - return true - } - genericErr = err.Err - } - - switch genericErr.(type) { - case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: - // If the error is one of the ones listed, then it is NOT retriable. - return true - } - - // If it's invalid header field name/value error thrown by http module, then it is NOT retriable. - // This could happen when metadata's key or value is invalid. (RoundTrip in transport.go) - if strings.Contains(genericErr.Error(), "invalid header field") { - return true - } - - // Assume the error is retriable. - return false -} - -var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} - -func isSuccessStatusCode(resp *http.Response) bool { - if resp == nil { - return false - } - for _, i := range successStatusCodes { - if i == resp.StatusCode { - return true - } - } - return false -} - -// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away -var logf = func(format string, a ...interface{}) {} - -// Use this version to see the retry method's code path (import "fmt") -//var logf = fmt.Printf - -/* -type deadlineExceededReadCloser struct { - r io.ReadCloser -} - -func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { - n, err := 0, io.EOF - if r.r != nil { - n, err = r.r.Read(p) - } - return n, improveDeadlineExceeded(err) -} -func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { - // For an HTTP request, the ReadCloser MUST also implement seek - // For an HTTP response, Seek MUST not be called (or this will panic) - o, err := r.r.(io.Seeker).Seek(offset, whence) - return o, improveDeadlineExceeded(err) -} -func (r *deadlineExceededReadCloser) Close() error { - if c, ok := r.r.(io.Closer); ok { - c.Close() - } - return nil -} - -// timeoutError is the internal struct that implements our richer timeout error. -type deadlineExceeded struct { - responseError -} - -var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time - -// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error. -func improveDeadlineExceeded(cause error) error { - // If cause is not DeadlineExceeded, return the same error passed in. - if cause != context.DeadlineExceeded { - return cause - } - // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message - return &deadlineExceeded{ - responseError: responseError{ - ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), - }, - } -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e *deadlineExceeded) Error() string { - return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") -} -*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go deleted file mode 100644 index 608e1051ca0..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go +++ /dev/null @@ -1,51 +0,0 @@ -package azblob - -import ( - "bytes" - "context" - "fmt" - "os" - "runtime" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// TelemetryOptions configures the telemetry policy's behavior. -type TelemetryOptions struct { - // Value is a string prepended to each request's User-Agent and sent to the service. - // The service records the user-agent in logs for diagnostics and tracking of client requests. - Value string -} - -// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects -// which add telemetry information to outgoing HTTP requests. -func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { - b := &bytes.Buffer{} - b.WriteString(o.Value) - if b.Len() > 0 { - b.WriteRune(' ') - } - fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) - telemetryValue := b.String() - - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - request.Header.Set("User-Agent", telemetryValue) - return next.Do(ctx, request) - } - }) -} - -// NOTE: the ONLY function that should write to this variable is this func -var platformInfo = func() string { - // Azure-Storage/version (runtime; os type and version)” - // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' - operatingSystem := runtime.GOOS // Default OS string - switch operatingSystem { - case "windows": - operatingSystem = os.Getenv("OS") // Get more specific OS information - case "linux": // accept default OS info - case "freebsd": // accept default OS info - } - return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) -}() diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go deleted file mode 100644 index 1f7817d2df6..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go +++ /dev/null @@ -1,36 +0,0 @@ -package azblob - -import ( - "context" - "errors" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object -// that sets the request's x-ms-client-request-id header if it doesn't already exist. -func NewUniqueRequestIDPolicyFactory() pipeline.Factory { - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - // This is Policy's Do method: - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - id := request.Header.Get(xMsClientRequestID) - if id == "" { // Add a unique request ID if the caller didn't specify one already - id = newUUID().String() - request.Header.Set(xMsClientRequestID, id) - } - - resp, err := next.Do(ctx, request) - - if err == nil && resp != nil { - crId := resp.Response().Header.Get(xMsClientRequestID) - if crId != "" && crId != id { - err = errors.New("client Request ID from request and response does not match") - } - } - - return resp, err - } - }) -} - -const xMsClientRequestID = "x-ms-client-request-id" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go deleted file mode 100644 index 6b84d95e3ed..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go +++ /dev/null @@ -1,244 +0,0 @@ -package azblob - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" -) - -// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas -type AccountSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() - ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() -} - -// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce -// the proper SAS query parameters. -func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { - return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") - } - if v.Version == "" { - v.Version = SASVersion - } - perms := &AccountSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - - startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) - - stringToSign := strings.Join([]string{ - sharedKeyCredential.AccountName(), - v.Permissions, - v.Services, - v.ResourceTypes, - startTime, - expiryTime, - v.IPRange.String(), - string(v.Protocol), - v.Version, - ""}, // That right, the account SAS requires a terminating extra newline - "\n") - - signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) - p := SASQueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - - // Account-specific SAS parameters - services: v.Services, - resourceTypes: v.ResourceTypes, - - // Calculated SAS signature - signature: signature, - } - - return p, nil -} - -// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. -type AccountSASPermissions struct { - Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags, PermanentDelete, Immutability bool -} - -// String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Permissions field. -func (p AccountSASPermissions) String() string { - var buffer bytes.Buffer - if p.Read { - buffer.WriteRune('r') - } - if p.Write { - buffer.WriteRune('w') - } - if p.Delete { - buffer.WriteRune('d') - } - if p.DeletePreviousVersion { - buffer.WriteRune('x') - } - if p.List { - buffer.WriteRune('l') - } - if p.Add { - buffer.WriteRune('a') - } - if p.Create { - buffer.WriteRune('c') - } - if p.Update { - buffer.WriteRune('u') - } - if p.Process { - buffer.WriteRune('p') - } - if p.Tag { - buffer.WriteRune('t') - } - if p.FilterByTags { - buffer.WriteRune('f') - } - if p.PermanentDelete { - buffer.WriteRune('y') - } - if p.Immutability { - buffer.WriteRune('i') - } - return buffer.String() -} - -// Parse initializes the AccountSASPermissions's fields from a string. -func (p *AccountSASPermissions) Parse(s string) error { - *p = AccountSASPermissions{} // Clear out the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'l': - p.List = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'u': - p.Update = true - case 'p': - p.Process = true - case 'x': - p.Process = true - case 't': - p.Tag = true - case 'f': - p.FilterByTags = true - case 'y': - p.PermanentDelete = true - case 'i': - p.Immutability = true - default: - return fmt.Errorf("invalid permission character: '%v'", r) - } - } - return nil -} - -// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. -type AccountSASServices struct { - Blob, Queue, File bool -} - -// String produces the SAS services string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Services field. -func (s AccountSASServices) String() string { - var buffer bytes.Buffer - if s.Blob { - buffer.WriteRune('b') - } - if s.Queue { - buffer.WriteRune('q') - } - if s.File { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASServices' fields from a string. -func (a *AccountSASServices) Parse(s string) error { - *a = AccountSASServices{} // Clear out the flags - for _, r := range s { - switch r { - case 'b': - a.Blob = true - case 'q': - a.Queue = true - case 'f': - a.File = true - default: - return fmt.Errorf("Invalid service character: '%v'", r) - } - } - return nil -} - -// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. -type AccountSASResourceTypes struct { - Service, Container, Object bool -} - -// String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's ResourceTypes field. -func (rt AccountSASResourceTypes) String() string { - var buffer bytes.Buffer - if rt.Service { - buffer.WriteRune('s') - } - if rt.Container { - buffer.WriteRune('c') - } - if rt.Object { - buffer.WriteRune('o') - } - return buffer.String() -} - -// Parse initializes the AccountSASResourceType's fields from a string. -func (rt *AccountSASResourceTypes) Parse(s string) error { - *rt = AccountSASResourceTypes{} // Clear out the flags - for _, r := range s { - switch r { - case 's': - rt.Service = true - case 'c': - rt.Container = true - case 'o': - rt.Object = true - default: - return fmt.Errorf("Invalid resource type: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go deleted file mode 100644 index bef67624caf..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go +++ /dev/null @@ -1,393 +0,0 @@ -package azblob - -import ( - "errors" - "net" - "net/url" - "strings" - "time" -) - -// SASVersion indicates the SAS version. -const SASVersion = ServiceVersion - -type SASProtocol string - -const ( - // SASProtocolHTTPS can be specified for a SAS protocol - SASProtocolHTTPS SASProtocol = "https" - - // SASProtocolHTTPSandHTTP can be specified for a SAS protocol - SASProtocolHTTPSandHTTP SASProtocol = "https,http" -) - -// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a -// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). -func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { - ss := "" - if !startTime.IsZero() { - ss = formatSASTimeWithDefaultFormat(&startTime) - } - se := "" - if !expiryTime.IsZero() { - se = formatSASTimeWithDefaultFormat(&expiryTime) - } - sh := "" - if !snapshotTime.IsZero() { - sh = snapshotTime.Format(SnapshotTimeFormat) - } - return ss, se, sh -} - -// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. -const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 -var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. - -// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". -func formatSASTimeWithDefaultFormat(t *time.Time) string { - return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. -func formatSASTime(t *time.Time, format string) string { - if format != "" { - return t.Format(format) - } - return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used -} - -// parseSASTimeString try to parse sas time string. -func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { - for _, sasTimeFormat := range SASTimeFormats { - t, err = time.Parse(sasTimeFormat, val) - if err == nil { - timeFormat = sasTimeFormat - break - } - } - - if err != nil { - err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") - } - - return -} - -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas - -// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. -// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components -// to a query parameter map by calling AddToValues(). -// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. -// -// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). -type SASQueryParameters struct { - // All members are immutable or values so copies of this struct are goroutine-safe. - version string `param:"sv"` - services string `param:"ss"` - resourceTypes string `param:"srt"` - protocol SASProtocol `param:"spr"` - startTime time.Time `param:"st"` - expiryTime time.Time `param:"se"` - snapshotTime time.Time `param:"snapshot"` - ipRange IPRange `param:"sip"` - identifier string `param:"si"` - resource string `param:"sr"` - permissions string `param:"sp"` - signature string `param:"sig"` - cacheControl string `param:"rscc"` - contentDisposition string `param:"rscd"` - contentEncoding string `param:"rsce"` - contentLanguage string `param:"rscl"` - contentType string `param:"rsct"` - signedOid string `param:"skoid"` - signedTid string `param:"sktid"` - signedStart time.Time `param:"skt"` - signedService string `param:"sks"` - signedExpiry time.Time `param:"ske"` - signedVersion string `param:"skv"` - signedDirectoryDepth string `param:"sdd"` - preauthorizedAgentObjectId string `param:"saoid"` - agentObjectId string `param:"suoid"` - correlationId string `param:"scid"` - // private member used for startTime and expiryTime formatting. - stTimeFormat string - seTimeFormat string -} - -func (p *SASQueryParameters) PreauthorizedAgentObjectId() string { - return p.preauthorizedAgentObjectId -} - -func (p *SASQueryParameters) AgentObjectId() string { - return p.agentObjectId -} - -func (p *SASQueryParameters) SignedCorrelationId() string { - return p.correlationId -} - -func (p *SASQueryParameters) SignedTid() string { - return p.signedTid -} - -func (p *SASQueryParameters) SignedStart() time.Time { - return p.signedStart -} - -func (p *SASQueryParameters) SignedExpiry() time.Time { - return p.signedExpiry -} - -func (p *SASQueryParameters) SignedService() string { - return p.signedService -} - -func (p *SASQueryParameters) SignedVersion() string { - return p.signedVersion -} - -func (p *SASQueryParameters) SnapshotTime() time.Time { - return p.snapshotTime -} - -func (p *SASQueryParameters) Version() string { - return p.version -} - -func (p *SASQueryParameters) Services() string { - return p.services -} -func (p *SASQueryParameters) ResourceTypes() string { - return p.resourceTypes -} -func (p *SASQueryParameters) Protocol() SASProtocol { - return p.protocol -} -func (p *SASQueryParameters) StartTime() time.Time { - return p.startTime -} -func (p *SASQueryParameters) ExpiryTime() time.Time { - return p.expiryTime -} - -func (p *SASQueryParameters) IPRange() IPRange { - return p.ipRange -} - -func (p *SASQueryParameters) Identifier() string { - return p.identifier -} - -func (p *SASQueryParameters) Resource() string { - return p.resource -} -func (p *SASQueryParameters) Permissions() string { - return p.permissions -} - -func (p *SASQueryParameters) Signature() string { - return p.signature -} - -func (p *SASQueryParameters) CacheControl() string { - return p.cacheControl -} - -func (p *SASQueryParameters) ContentDisposition() string { - return p.contentDisposition -} - -func (p *SASQueryParameters) ContentEncoding() string { - return p.contentEncoding -} - -func (p *SASQueryParameters) ContentLanguage() string { - return p.contentLanguage -} - -func (p *SASQueryParameters) ContentType() string { - return p.contentType -} - -func (p *SASQueryParameters) SignedDirectoryDepth() string { - return p.signedDirectoryDepth -} - -// IPRange represents a SAS IP range's start IP and (optionally) end IP. -type IPRange struct { - Start net.IP // Not specified if length = 0 - End net.IP // Not specified if length = 0 -} - -// String returns a string representation of an IPRange. -func (ipr *IPRange) String() string { - if len(ipr.Start) == 0 { - return "" - } - start := ipr.Start.String() - if len(ipr.End) == 0 { - return start - } - return start + "-" + ipr.End.String() -} - -// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the -// query parameter map's passed-in values. If deleteSASParametersFromValues is true, -// all SAS-related query parameters are removed from the passed-in map. If -// deleteSASParametersFromValues is false, the map passed-in map is unaltered. -func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { - p := SASQueryParameters{} - for k, v := range values { - val := v[0] - isSASKey := true - switch strings.ToLower(k) { - case "sv": - p.version = val - case "ss": - p.services = val - case "srt": - p.resourceTypes = val - case "spr": - p.protocol = SASProtocol(val) - case "snapshot": - p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) - case "st": - p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) - case "se": - p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) - case "sip": - dashIndex := strings.Index(val, "-") - if dashIndex == -1 { - p.ipRange.Start = net.ParseIP(val) - } else { - p.ipRange.Start = net.ParseIP(val[:dashIndex]) - p.ipRange.End = net.ParseIP(val[dashIndex+1:]) - } - case "si": - p.identifier = val - case "sr": - p.resource = val - case "sp": - p.permissions = val - case "sig": - p.signature = val - case "rscc": - p.cacheControl = val - case "rscd": - p.contentDisposition = val - case "rsce": - p.contentEncoding = val - case "rscl": - p.contentLanguage = val - case "rsct": - p.contentType = val - case "skoid": - p.signedOid = val - case "sktid": - p.signedTid = val - case "skt": - p.signedStart, _ = time.Parse(SASTimeFormat, val) - case "ske": - p.signedExpiry, _ = time.Parse(SASTimeFormat, val) - case "sks": - p.signedService = val - case "skv": - p.signedVersion = val - case "sdd": - p.signedDirectoryDepth = val - case "saoid": - p.preauthorizedAgentObjectId = val - case "suoid": - p.agentObjectId = val - case "scid": - p.correlationId = val - default: - isSASKey = false // We didn't recognize the query parameter - } - if isSASKey && deleteSASParametersFromValues { - delete(values, k) - } - } - return p -} - -// AddToValues adds the SAS components to the specified query parameters map. -func (p *SASQueryParameters) addToValues(v url.Values) url.Values { - if p.version != "" { - v.Add("sv", p.version) - } - if p.services != "" { - v.Add("ss", p.services) - } - if p.resourceTypes != "" { - v.Add("srt", p.resourceTypes) - } - if p.protocol != "" { - v.Add("spr", string(p.protocol)) - } - if !p.startTime.IsZero() { - v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) - } - if !p.expiryTime.IsZero() { - v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) - } - if len(p.ipRange.Start) > 0 { - v.Add("sip", p.ipRange.String()) - } - if p.identifier != "" { - v.Add("si", p.identifier) - } - if p.resource != "" { - v.Add("sr", p.resource) - } - if p.permissions != "" { - v.Add("sp", p.permissions) - } - if p.signedOid != "" { - v.Add("skoid", p.signedOid) - v.Add("sktid", p.signedTid) - v.Add("skt", p.signedStart.Format(SASTimeFormat)) - v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) - v.Add("sks", p.signedService) - v.Add("skv", p.signedVersion) - } - if p.signature != "" { - v.Add("sig", p.signature) - } - if p.cacheControl != "" { - v.Add("rscc", p.cacheControl) - } - if p.contentDisposition != "" { - v.Add("rscd", p.contentDisposition) - } - if p.contentEncoding != "" { - v.Add("rsce", p.contentEncoding) - } - if p.contentLanguage != "" { - v.Add("rscl", p.contentLanguage) - } - if p.contentType != "" { - v.Add("rsct", p.contentType) - } - if p.signedDirectoryDepth != "" { - v.Add("sdd", p.signedDirectoryDepth) - } - if p.preauthorizedAgentObjectId != "" { - v.Add("saoid", p.preauthorizedAgentObjectId) - } - if p.agentObjectId != "" { - v.Add("suoid", p.agentObjectId) - } - if p.correlationId != "" { - v.Add("scid", p.correlationId) - } - return v -} - -// Encode encodes the SAS query parameters into URL encoded form sorted by key. -func (p *SASQueryParameters) Encode() string { - v := url.Values{} - p.addToValues(v) - return v.Encode() -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go deleted file mode 100644 index d09ddcffcc6..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go +++ /dev/null @@ -1,134 +0,0 @@ -package azblob - -// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes - -const ( - // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. - ServiceCodeNone ServiceCodeType = "" - - // ServiceCodeAccountAlreadyExists means the specified account already exists. - ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" - - // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). - ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" - - // ServiceCodeAccountIsDisabled means the specified account is disabled (403). - ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" - - // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). - ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" - - // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). - ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" - - // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). - ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" - - // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). - ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" - - // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). - ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" - - // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). - ServiceCodeInternalError ServiceCodeType = "InternalError" - - // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). - ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" - - // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). - ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" - - // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). - ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" - - // ServiceCodeInvalidInput means one of the request inputs is not valid (400). - ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" - - // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). - ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" - - // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). - ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" - - // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). - ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" - - // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). - ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" - - // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). - ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" - - // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). - ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" - - // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). - ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" - - // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). - ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" - - // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). - ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" - - // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). - ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" - - // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). - ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" - - // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). - ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" - - // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). - ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" - - // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). - ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" - - // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). - ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" - - // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). - ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" - - // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). - ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" - - // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). - ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" - - // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). - ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" - - // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). - ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" - - // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). - ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" - - // ServiceCodeResourceAlreadyExists means the specified resource already exists (409). - ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" - - // ServiceCodeResourceNotFound means the specified resource does not exist (404). - ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" - - // ServiceCodeNoAuthenticationInformation means the specified authentication for the resource does not exist (401). - ServiceCodeNoAuthenticationInformation ServiceCodeType = "NoAuthenticationInformation" - - // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). - ServiceCodeServerBusy ServiceCodeType = "ServerBusy" - - // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). - ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" - - // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). - ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" - - // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). - ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" - - // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). - ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" -) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go deleted file mode 100644 index a3cbd9817bf..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go +++ /dev/null @@ -1,111 +0,0 @@ -package azblob - -import ( - "bytes" - "encoding/xml" - "fmt" - "net/http" - "sort" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -func init() { - // wire up our custom error handling constructor - responseErrorFactory = newStorageError -} - -// ServiceCodeType is a string identifying a storage service error. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 -type ServiceCodeType string - -// StorageError identifies a responder-generated network or response parsing error. -type StorageError interface { - // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). - ResponseError - - // ServiceCode returns a service error code. Your code can use this to make error recovery decisions. - ServiceCode() ServiceCodeType -} - -// storageError is the internal struct that implements the public StorageError interface. -type storageError struct { - responseError - serviceCode ServiceCodeType - details map[string]string -} - -// newStorageError creates an error object that implements the error interface. -func newStorageError(cause error, response *http.Response, description string) error { - return &storageError{ - responseError: responseError{ - ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), - response: response, - description: description, - }, - serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), - } -} - -// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. -func (e *storageError) ServiceCode() ServiceCodeType { - return e.serviceCode -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e *storageError) Error() string { - b := &bytes.Buffer{} - fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) - fmt.Fprintf(b, "Description=%s, Details: ", e.description) - if len(e.details) == 0 { - b.WriteString("(none)\n") - } else { - b.WriteRune('\n') - keys := make([]string, 0, len(e.details)) - // Alphabetize the details - for k := range e.details { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) - } - } - req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request - pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) - return e.ErrorNode.Error(b.String()) -} - -// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). -func (e *storageError) Temporary() bool { - if e.response != nil { - if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { - return true - } - } - return e.ErrorNode.Temporary() -} - -// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. -func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { - tokName := "" - var t xml.Token - for t, err = d.Token(); err == nil; t, err = d.Token() { - switch tt := t.(type) { - case xml.StartElement: - tokName = tt.Name.Local - break - case xml.CharData: - switch tokName { - case "Message": - e.description = string(tt) - default: - if e.details == nil { - e.details = map[string]string{} - } - e.details[tokName] = string(tt) - } - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go deleted file mode 100644 index d7b2507e43f..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go +++ /dev/null @@ -1,64 +0,0 @@ -package azblob - -import ( - "errors" - "fmt" - "io" - "strconv" -) - -// httpRange defines a range of bytes within an HTTP resource, starting at offset and -// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange -// which has an offset but na zero value count indicates from the offset to the resource's end. -type httpRange struct { - offset int64 - count int64 -} - -func (r httpRange) pointers() *string { - if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance - return nil // No specified range - } - endOffset := "" // if count == CountToEnd (0) - if r.count > 0 { - endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) - } - dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) - return &dataRange -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { - if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long - return 0, nil - } - - err := validateSeekableStreamAt0(body) - if err != nil { - return 0, err - } - - count, err := body.Seek(0, io.SeekEnd) - if err != nil { - return 0, errors.New("body stream must be seekable") - } - - body.Seek(0, io.SeekStart) - return count, nil -} - -// return an error if body is not a valid seekable stream at 0 -func validateSeekableStreamAt0(body io.ReadSeeker) error { - if body == nil { // nil body's are "logically" seekable to 0 - return nil - } - if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { - // Help detect programmer error - if err != nil { - return errors.New("body stream must be seekable") - } - return errors.New("body stream must be set to position 0") - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go deleted file mode 100644 index 66799f9cb65..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go +++ /dev/null @@ -1,77 +0,0 @@ -package azblob - -import ( - "crypto/rand" - "fmt" - "strconv" -) - -// The UUID reserved variants. -const ( - reservedNCS byte = 0x80 - reservedRFC4122 byte = 0x40 - reservedMicrosoft byte = 0x20 - reservedFuture byte = 0x00 -) - -// A UUID representation compliant with specification in RFC 4122 document. -type uuid [16]byte - -// NewUUID returns a new uuid using RFC 4122 algorithm. -func newUUID() (u uuid) { - u = uuid{} - // Set all bits to randomly (or pseudo-randomly) chosen values. - rand.Read(u[:]) - u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) - - var version byte = 4 - u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) - return -} - -// String returns an unparsed version of the generated UUID sequence. -func (u uuid) String() string { - return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" -// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. -func parseUUID(uuidStr string) uuid { - char := func(hexString string) byte { - i, _ := strconv.ParseUint(hexString, 16, 8) - return byte(i) - } - if uuidStr[0] == '{' { - uuidStr = uuidStr[1:] // Skip over the '{' - } - // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f - // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 - // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 - uuidVal := uuid{ - char(uuidStr[0:2]), - char(uuidStr[2:4]), - char(uuidStr[4:6]), - char(uuidStr[6:8]), - - char(uuidStr[9:11]), - char(uuidStr[11:13]), - - char(uuidStr[14:16]), - char(uuidStr[16:18]), - - char(uuidStr[19:21]), - char(uuidStr[21:23]), - - char(uuidStr[24:26]), - char(uuidStr[26:28]), - char(uuidStr[28:30]), - char(uuidStr[30:32]), - char(uuidStr[32:34]), - char(uuidStr[34:36]), - } - return uuidVal -} - -func (u uuid) bytes() []byte { - return u[:] -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go deleted file mode 100644 index 6b3779c0e98..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2017 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -/* -Package azblob allows you to manipulate Azure Storage containers and blobs objects. - -URL Types - -The most common types you'll work with are the XxxURL types. The methods of these types make requests -against the Azure Storage Service. - - - ServiceURL's methods perform operations on a storage account. - - ContainerURL's methods perform operations on an account's container. - - BlockBlobURL's methods perform operations on a container's block blob. - - AppendBlobURL's methods perform operations on a container's append blob. - - PageBlobURL's methods perform operations on a container's page blob. - - BlobURL's methods perform operations on a container's blob regardless of the blob's type. - -Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP -request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. -The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. - -Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass -an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own -URL but it shares the same pipeline as the parent ServiceURL object. - -To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. -To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL -respectively. These three types are all identical except for the methods they expose; each type exposes the methods -relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; -this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, -the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You -can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. - -If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL -object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object -with the same URL as the original but with the specified pipeline. - -Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that -XxxURL objects share a lot of system resources making them very efficient. - -All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, -transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an -example of how to do deal with errors. - -URL and Shared Access Signature Manipulation - -The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types -for generating and parsing Shared Access Signature (SAS) - - Use the AccountSASSignatureValues type to create a SAS for a storage account. - - Use the BlobSASSignatureValues type to create a SAS for a container or blob. - - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. - -To generate a SAS, you must use the SharedKeyCredential type. - -Credentials - -When creating a request pipeline, you must specify one of this package's credential types. - - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). - - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this - to generate Shared Access Signatures. - -HTTP Request Policy Factories - -This package defines several request policy factories for use with the pipeline package. -Most applications will not use these factories directly; instead, the NewPipeline -function creates these factories, initializes them (via the PipelineOptions type) -and returns a pipeline object for use by the XxxURL objects. - -However, for advanced scenarios, developers can access these policy factories directly -and even create their own and then construct their own pipeline in order to affect HTTP -requests and responses performed by the XxxURL objects. For example, developers can -introduce their own logging, random failures, request recording & playback for fast -testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The -possibilities are endless! - -Below are the request pipeline policy factory functions that are provided with this -package: - - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. - - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. - - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. - - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. - -Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. -*/ -package azblob - -// TokenCredential Use this to access resources using Role-Based Access Control (RBAC). diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go deleted file mode 100644 index 9a0144bf5f4..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go +++ /dev/null @@ -1,532 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/base64" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" -) - -// appendBlobClient is the client for the AppendBlob methods of the Azblob service. -type appendBlobClient struct { - managementClient -} - -// newAppendBlobClient creates an instance of the appendBlobClient client. -func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { - return appendBlobClient{newManagementClient(url, p)} -} - -// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append -// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is -// supported only on version 2015-02-21 version or later. -// -// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an -// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more -// information, see Setting -// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to -// be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be -// validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active and -// matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If -// the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the -// value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - -// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A -// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this -// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - -// Precondition Failed). encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in -// the request. If not specified, encryption is performed with the root account encryption key. For more information, -// see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided -// encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm -// used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the -// x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the -// name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is -// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage -// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the -// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been -// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching -// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a -// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*AppendBlobAppendBlockResponse, error) { - if err := validate([]validation{ - {targetValue: body, - constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) - if err != nil { - return nil, err - } - return resp.(*AppendBlobAppendBlockResponse), err -} - -// appendBlockPreparer prepares the AppendBlock request. -func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, body) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "appendblock") - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - if transactionalContentCrc64 != nil { - req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if maxSize != nil { - req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) - } - if appendPosition != nil { - req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// appendBlockResponder handles the response to the AppendBlock request. -func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err -} - -// AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob -// where the contents are read from a source url. The Append Block operation is permitted only if the blob was created -// with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. -// -// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of -// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must -// be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the range of bytes that must be -// read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to -// be validated by the service. encryptionKey is optional. Specifies the encryption key to use to encrypt the data -// provided in the request. If not specified, encryption is performed with the root account encryption key. For more -// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the -// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the -// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided -// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies -// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is -// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage -// Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this -// ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If the Append -// Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value -// specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - -// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A -// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this -// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - -// Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been modified -// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has -// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a -// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is -// specify a SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is -// specify this header value to operate only on a blob if it has been modified since the specified date/time. -// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the -// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. -// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides -// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. copySourceAuthorization is only Bearer type is supported. Credentials should be a -// valid OAuth access token to copy source. -func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*AppendBlobAppendBlockFromURLResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req) - if err != nil { - return nil, err - } - return resp.(*AppendBlobAppendBlockFromURLResponse), err -} - -// appendBlockFromURLPreparer prepares the AppendBlockFromURL request. -func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "appendblock") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-copy-source", sourceURL) - if sourceRange != nil { - req.Header.Set("x-ms-source-range", *sourceRange) - } - if sourceContentMD5 != nil { - req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) - } - if sourceContentcrc64 != nil { - req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) - } - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if maxSize != nil { - req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) - } - if appendPosition != nil { - req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - if sourceIfModifiedSince != nil { - req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfUnmodifiedSince != nil { - req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfMatch != nil { - req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) - } - if sourceIfNoneMatch != nil { - req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if copySourceAuthorization != nil { - req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) - } - return req, nil -} - -// appendBlockFromURLResponder handles the response to the AppendBlockFromURL request. -func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err -} - -// Create the Create Append Blob operation creates a new append blob. -// -// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more -// information, see Setting -// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, -// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the -// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. -// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the -// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this -// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. -// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and -// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the -// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the -// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified -// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, -// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and -// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is -// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. -// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not -// specified, encryption is performed with the root account encryption key. For more information, see Encryption at -// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be -// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the -// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key -// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption -// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default -// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. ifModifiedSince -// is specify this header value to operate only on a blob if it has been modified since the specified date/time. -// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the -// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is -// specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on -// blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, opaque value -// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. -// blobTagsString is optional. Used to set blob tags in various blob operations. immutabilityPolicyExpiry is specifies -// the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the -// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. -func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*AppendBlobCreateResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) - if err != nil { - return nil, err - } - return resp.(*AppendBlobCreateResponse), err -} - -// createPreparer prepares the Create request. -func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobContentType != nil { - req.Header.Set("x-ms-blob-content-type", *blobContentType) - } - if blobContentEncoding != nil { - req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) - } - if blobContentLanguage != nil { - req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) - } - if blobContentMD5 != nil { - req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) - } - if blobCacheControl != nil { - req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) - } - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if blobContentDisposition != nil { - req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if blobTagsString != nil { - req.Header.Set("x-ms-tags", *blobTagsString) - } - if immutabilityPolicyExpiry != nil { - req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { - req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) - } - if legalHold != nil { - req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) - } - req.Header.Set("x-ms-blob-type", "AppendBlob") - return req, nil -} - -// createResponder handles the response to the Create request. -func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err -} - -// Seal the Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 -// version or later. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if -// specified, the operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is -// specify this header value to operate only on a blob if it has been modified since the specified date/time. -// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the -// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is -// specify an ETag value to operate only on blobs without a matching value. appendPosition is optional conditional -// header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will -// succeed only if the append position is equal to this number. If it is not, the request will fail with the -// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). -func (client appendBlobClient) Seal(ctx context.Context, timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (*AppendBlobSealResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.sealPreparer(timeout, requestID, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, appendPosition) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.sealResponder}, req) - if err != nil { - return nil, err - } - return resp.(*AppendBlobSealResponse), err -} - -// sealPreparer prepares the Seal request. -func (client appendBlobClient) sealPreparer(timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "seal") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if appendPosition != nil { - req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) - } - return req, nil -} - -// sealResponder handles the response to the Seal request. -func (client appendBlobClient) sealResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &AppendBlobSealResponse{rawResponse: resp.Response()}, err -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go deleted file mode 100644 index 6e3b9a207a4..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go +++ /dev/null @@ -1,2063 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" -) - -// blobClient is the client for the Blob methods of the Azblob service. -type blobClient struct { - managementClient -} - -// newBlobClient creates an instance of the blobClient client. -func newBlobClient(url url.URL, p pipeline.Pipeline) blobClient { - return blobClient{newManagementClient(url, p)} -} - -// AbortCopyFromURL the Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a -// destination blob with zero length and full metadata. -// -// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is -// the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character -// limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.abortCopyFromURLPreparer(copyID, timeout, leaseID, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.abortCopyFromURLResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobAbortCopyFromURLResponse), err -} - -// abortCopyFromURLPreparer prepares the AbortCopyFromURL request. -func (client blobClient) abortCopyFromURLPreparer(copyID string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - params.Set("copyid", copyID) - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "copy") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-copy-action", "abort") - return req, nil -} - -// abortCopyFromURLResponder handles the response to the AbortCopyFromURL request. -func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusNoContent) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobAbortCopyFromURLResponse{rawResponse: resp.Response()}, err -} - -// AcquireLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete -// operations -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative -// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration -// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob -// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor -// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the -// analytics logs when storage analytics logging is enabled. -func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobAcquireLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobAcquireLeaseResponse), err -} - -// acquireLeasePreparer prepares the AcquireLease request. -func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - req.URL.RawQuery = params.Encode() - if duration != nil { - req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) - } - if proposedLeaseID != nil { - req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "acquire") - return req, nil -} - -// acquireLeaseResponder handles the response to the AcquireLease request. -func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobAcquireLeaseResponse{rawResponse: resp.Response()}, err -} - -// BreakLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete -// operations -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should -// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the -// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available -// before the break period has expired, but the lease may be held for longer than the break period. If this header does -// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an -// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has -// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on -// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is -// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when -// storage analytics logging is enabled. -func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobBreakLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobBreakLeaseResponse), err -} - -// breakLeasePreparer prepares the BreakLease request. -func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - req.URL.RawQuery = params.Encode() - if breakPeriod != nil { - req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "break") - return req, nil -} - -// breakLeaseResponder handles the response to the BreakLease request. -func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobBreakLeaseResponse{rawResponse: resp.Response()}, err -} - -// ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete -// operations -// -// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string -// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See -// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in -// seconds. For more information, see Setting -// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate -// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobChangeLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobChangeLeaseResponse), err -} - -// changeLeasePreparer prepares the ChangeLease request. -func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-lease-id", leaseID) - req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "change") - return req, nil -} - -// changeLeaseResponder handles the response to the ChangeLease request. -func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err -} - -// CopyFromURL the Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a -// response until the copy is complete. -// -// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that -// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob -// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is -// expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated -// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or -// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with -// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version -// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. -// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the -// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not -// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a -// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL -// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation -// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, -// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is -// enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be read from the copy -// source. blobTagsString is optional. Used to set blob tags in various blob operations. immutabilityPolicyExpiry is -// specifies the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the -// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. -// copySourceAuthorization is only Bearer type is supported. Credentials should be a valid OAuth access token to copy -// source. -func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool, copySourceAuthorization *string) (*BlobCopyFromURLResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.copyFromURLPreparer(copySource, timeout, metadata, tier, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, sourceContentMD5, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, copySourceAuthorization) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyFromURLResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobCopyFromURLResponse), err -} - -// copyFromURLPreparer prepares the CopyFromURL request. -func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool, copySourceAuthorization *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if tier != AccessTierNone { - req.Header.Set("x-ms-access-tier", string(tier)) - } - if sourceIfModifiedSince != nil { - req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfUnmodifiedSince != nil { - req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfMatch != nil { - req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) - } - if sourceIfNoneMatch != nil { - req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-copy-source", copySource) - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if sourceContentMD5 != nil { - req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) - } - if blobTagsString != nil { - req.Header.Set("x-ms-tags", *blobTagsString) - } - if immutabilityPolicyExpiry != nil { - req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { - req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) - } - if legalHold != nil { - req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) - } - if copySourceAuthorization != nil { - req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) - } - req.Header.Set("x-ms-requires-sync", "true") - return req, nil -} - -// copyFromURLResponder handles the response to the CopyFromURL request. -func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobCopyFromURLResponse{rawResponse: resp.Response()}, err -} - -// CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated -// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or -// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with -// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version -// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. encryptionKey is optional. Specifies the encryption key to use -// to encrypt the data provided in the request. If not specified, encryption is performed with the root account -// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the -// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. -// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is -// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version -// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the -// request. If not specified, encryption is performed with the default account encryption scope. For more information, -// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.createSnapshotPreparer(timeout, metadata, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createSnapshotResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobCreateSnapshotResponse), err -} - -// createSnapshotPreparer prepares the CreateSnapshot request. -func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "snapshot") - req.URL.RawQuery = params.Encode() - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// createSnapshotResponder handles the response to the CreateSnapshot request. -func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobCreateSnapshotResponse{rawResponse: resp.Response()}, err -} - -// Delete if the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently -// removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is -// deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob -// or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] -// (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently -// removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it -// is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which -// blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. -// All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 -// (ResourceNotFound). -// -// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to -// retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, -// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the -// timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one -// of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's -// snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has -// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on -// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is -// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when -// storage analytics logging is enabled. blobDeleteType is optional. Only possible value is 'permanent', which -// specifies to permanently delete a blob if blob soft delete is enabled. -func (client blobClient) Delete(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobDeleteType BlobDeleteType) (*BlobDeleteResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.deletePreparer(snapshot, versionID, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobDeleteType) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobDeleteResponse), err -} - -// deletePreparer prepares the Delete request. -func (client blobClient) deletePreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobDeleteType BlobDeleteType) (pipeline.Request, error) { - req, err := pipeline.NewRequest("DELETE", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - if versionID != nil && len(*versionID) > 0 { - params.Set("versionid", *versionID) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - if blobDeleteType != BlobDeleteNone { - params.Set("deletetype", string(blobDeleteType)) - } - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if deleteSnapshots != DeleteSnapshotsOptionNone { - req.Header.Set("x-ms-delete-snapshots", string(deleteSnapshots)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// deleteResponder handles the response to the Delete request. -func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobDeleteResponse{rawResponse: resp.Response()}, err -} - -// DeleteImmutabilityPolicy the Delete Immutability Policy operation deletes the immutability policy on the blob -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blobClient) DeleteImmutabilityPolicy(ctx context.Context, timeout *int32, requestID *string) (*BlobDeleteImmutabilityPolicyResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.deleteImmutabilityPolicyPreparer(timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteImmutabilityPolicyResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobDeleteImmutabilityPolicyResponse), err -} - -// deleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request. -func (client blobClient) deleteImmutabilityPolicyPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("DELETE", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "immutabilityPolicies") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// deleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. -func (client blobClient) deleteImmutabilityPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobDeleteImmutabilityPolicyResponse{rawResponse: resp.Response()}, err -} - -// Download the Download operation reads or downloads a blob from the system, including its metadata and properties. -// You can also call Download to read a snapshot. -// -// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to -// retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, -// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the -// timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified -// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for -// the range, as long as the range is less than or equal to 4 MB in size. rangeGetContentCRC64 is when set to true and -// specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less -// than or equal to 4 MB in size. encryptionKey is optional. Specifies the encryption key to use to encrypt the data -// provided in the request. If not specified, encryption is performed with the root account encryption key. For more -// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the -// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the -// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided -// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the -// analytics logs when storage analytics logging is enabled. -func (client blobClient) Download(ctx context.Context, snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*downloadResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.downloadPreparer(snapshot, versionID, timeout, rangeParameter, leaseID, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.downloadResponder}, req) - if err != nil { - return nil, err - } - return resp.(*downloadResponse), err -} - -// downloadPreparer prepares the Download request. -func (client blobClient) downloadPreparer(snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - if versionID != nil && len(*versionID) > 0 { - params.Set("versionid", *versionID) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - if rangeParameter != nil { - req.Header.Set("x-ms-range", *rangeParameter) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if rangeGetContentMD5 != nil { - req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5)) - } - if rangeGetContentCRC64 != nil { - req.Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*rangeGetContentCRC64)) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// downloadResponder handles the response to the Download request. -func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) - if resp == nil { - return nil, err - } - return &downloadResponse{rawResponse: resp.Response()}, err -} - -// GetAccountInfo returns the sku name and account kind -func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { - req, err := client.getAccountInfoPreparer() - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobGetAccountInfoResponse), err -} - -// getAccountInfoPreparer prepares the GetAccountInfo request. -func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - params.Set("restype", "account") - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - return req, nil -} - -// getAccountInfoResponder handles the response to the GetAccountInfo request. -func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err -} - -// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system -// properties for the blob. It does not return the content of the blob. -// -// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to -// retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, -// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the -// timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the -// data provided in the request. If not specified, encryption is performed with the root account encryption key. For -// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the -// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the -// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided -// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the -// analytics logs when storage analytics logging is enabled. -func (client blobClient) GetProperties(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobGetPropertiesResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getPropertiesPreparer(snapshot, versionID, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobGetPropertiesResponse), err -} - -// getPropertiesPreparer prepares the GetProperties request. -func (client blobClient) getPropertiesPreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("HEAD", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - if versionID != nil && len(*versionID) > 0 { - params.Set("versionid", *versionID) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getPropertiesResponder handles the response to the GetProperties request. -func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err -} - -// GetTags the Get Tags operation enables users to get the tags associated with a blob. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. snapshot is the -// snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more -// information on working with blob snapshots, see Creating -// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, -// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. ifTags is specify a -// SQL where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation -// only succeeds if the resource's lease is active and matches this ID. -func (client blobClient) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string, leaseID *string) (*BlobTags, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getTagsPreparer(timeout, requestID, snapshot, versionID, ifTags, leaseID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getTagsResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobTags), err -} - -// getTagsPreparer prepares the GetTags request. -func (client blobClient) getTagsPreparer(timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string, leaseID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - if versionID != nil && len(*versionID) > 0 { - params.Set("versionid", *versionID) - } - params.Set("comp", "tags") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - return req, nil -} - -// getTagsResponder handles the response to the GetTags request. -func (client blobClient) getTagsResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &BlobTags{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// Query the Query operation enables users to select/project on blob data by providing simple query expressions. -// -// // queryPreparer prepares the Query request. -// func (client blobClient) queryPreparer(snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *string, ifNoneMatch *string, ifTags *string, requestID *string) (pipeline.Request, error) { -// req, err := pipeline.NewRequest("POST", client.url, nil) -// if err != nil { -// return req, pipeline.NewError(err, "failed to create request") -// } -// params := req.URL.Query() -// if snapshot != nil && len(*snapshot) > 0 { -// params.Set("snapshot", *snapshot) -// } -// if timeout != nil { -// params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) -// } -// params.Set("comp", "query") -// req.URL.RawQuery = params.Encode() -// if leaseID != nil { -// req.Header.Set("x-ms-lease-id", *leaseID) -// } -// if encryptionKey != nil { -// req.Header.Set("x-ms-encryption-key", *encryptionKey) -// } -// if encryptionKeySha256 != nil { -// req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) -// } -// if encryptionAlgorithm != EncryptionAlgorithmNone { -// req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) -// } -// if ifModifiedSince != nil { -// req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) -// } -// if ifUnmodifiedSince != nil { -// req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) -// } -// if ifMatch != nil { -// req.Header.Set("If-Match", *ifMatch) -// } -// if ifNoneMatch != nil { -// req.Header.Set("If-None-Match", *ifNoneMatch) -// } -// if ifTags != nil { -// req.Header.Set("x-ms-if-tags", *ifTags) -// } -// req.Header.Set("x-ms-version", ServiceVersion) -// if requestID != nil { -// req.Header.Set("x-ms-client-request-id", *requestID) -// } -// b, err := xml.Marshal(queryRequest) -// if err != nil { -// return req, pipeline.NewError(err, "failed to marshal request body") -// } -// req.Header.Set("Content-Type", "application/xml") -// err = req.SetBody(bytes.NewReader(b)) -// if err != nil { -// return req, pipeline.NewError(err, "failed to set request body") -// } -// return req, nil -// } -// -// // queryResponder handles the response to the Query request. -// func (client blobClient) queryResponder(resp pipeline.Response) (pipeline.Response, error) { -// err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) -// if resp == nil { -// return nil, err -// } -// return &QueryResponse{rawResponse: resp.Response()}, err -// } - -// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete -// operations -// -// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. -// For more information, see Setting -// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate -// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobReleaseLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobReleaseLeaseResponse), err -} - -// releaseLeasePreparer prepares the ReleaseLease request. -func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-lease-id", leaseID) - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "release") - return req, nil -} - -// releaseLeaseResponder handles the response to the ReleaseLease request. -func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err -} - -// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete -// operations -// -// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. -// For more information, see Setting -// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate -// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobRenewLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobRenewLeaseResponse), err -} - -// renewLeasePreparer prepares the RenewLease request. -func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-lease-id", leaseID) - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "renew") - return req, nil -} - -// renewLeaseResponder handles the response to the RenewLease request. -func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err -} - -// SetExpiry sets the time a blob will expire and be deleted. -// -// expiryOptions is required. Indicates mode of the expiry time timeout is the timeout parameter is expressed in -// seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. expiresOn is the -// time to set the blob to expiry -func (client blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (*BlobSetExpiryResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setExpiryPreparer(expiryOptions, timeout, requestID, expiresOn) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setExpiryResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobSetExpiryResponse), err -} - -// setExpiryPreparer prepares the SetExpiry request. -func (client blobClient) setExpiryPreparer(expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "expiry") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-expiry-option", string(expiryOptions)) - if expiresOn != nil { - req.Header.Set("x-ms-expiry-time", *expiresOn) - } - return req, nil -} - -// setExpiryResponder handles the response to the SetExpiry request. -func (client blobClient) setExpiryResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobSetExpiryResponse{rawResponse: resp.Response()}, err -} - -// SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, -// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's -// content type. If specified, this property is stored with the blob and returned with a read request. blobContentMD5 -// is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual -// blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If -// specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional. -// Set the blob's content language. If specified, this property is stored with the blob and returned with a read -// request. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL -// where clause on blob tags to operate only on blobs with a matching value. blobContentDisposition is optional. Sets -// the blob's Content-Disposition header. requestID is provides a client-generated, opaque value with a 1 KB character -// limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobContentDisposition, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setHTTPHeadersResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobSetHTTPHeadersResponse), err -} - -// setHTTPHeadersPreparer prepares the SetHTTPHeaders request. -func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobContentDisposition *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - if blobCacheControl != nil { - req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) - } - if blobContentType != nil { - req.Header.Set("x-ms-blob-content-type", *blobContentType) - } - if blobContentMD5 != nil { - req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) - } - if blobContentEncoding != nil { - req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) - } - if blobContentLanguage != nil { - req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - if blobContentDisposition != nil { - req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// setHTTPHeadersResponder handles the response to the SetHTTPHeaders request. -func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobSetHTTPHeadersResponse{rawResponse: resp.Response()}, err -} - -// SetImmutabilityPolicy the Set Immutability Policy operation sets the immutability policy on the blob -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. ifUnmodifiedSince -// is specify this header value to operate only on a blob if it has not been modified since the specified date/time. -// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. -// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. -func (client blobClient) SetImmutabilityPolicy(ctx context.Context, timeout *int32, requestID *string, ifUnmodifiedSince *time.Time, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType) (*BlobSetImmutabilityPolicyResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setImmutabilityPolicyPreparer(timeout, requestID, ifUnmodifiedSince, immutabilityPolicyExpiry, immutabilityPolicyMode) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setImmutabilityPolicyResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobSetImmutabilityPolicyResponse), err -} - -// setImmutabilityPolicyPreparer prepares the SetImmutabilityPolicy request. -func (client blobClient) setImmutabilityPolicyPreparer(timeout *int32, requestID *string, ifUnmodifiedSince *time.Time, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "immutabilityPolicies") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyExpiry != nil { - req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { - req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) - } - return req, nil -} - -// setImmutabilityPolicyResponder handles the response to the SetImmutabilityPolicy request. -func (client blobClient) setImmutabilityPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobSetImmutabilityPolicyResponse{rawResponse: resp.Response()}, err -} - -// SetLegalHold the Set Legal Hold operation sets a legal hold on the blob. -// -// legalHold is specified if a legal hold should be set on the blob. timeout is the timeout parameter is expressed in -// seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blobClient) SetLegalHold(ctx context.Context, legalHold bool, timeout *int32, requestID *string) (*BlobSetLegalHoldResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setLegalHoldPreparer(legalHold, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setLegalHoldResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobSetLegalHoldResponse), err -} - -// setLegalHoldPreparer prepares the SetLegalHold request. -func (client blobClient) setLegalHoldPreparer(legalHold bool, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "legalhold") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold)) - return req, nil -} - -// setLegalHoldResponder handles the response to the SetLegalHold request. -func (client blobClient) setLegalHoldResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobSetLegalHoldResponse{rawResponse: resp.Response()}, err -} - -// SetMetadata the Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more -// name-value pairs -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated -// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or -// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with -// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version -// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the -// resource's lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to -// encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption -// key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 -// hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. -// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is -// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version -// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the -// request. If not specified, encryption is performed with the default account encryption scope. For more information, -// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the -// analytics logs when storage analytics logging is enabled. -func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobSetMetadataResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setMetadataPreparer(timeout, metadata, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobSetMetadataResponse), err -} - -// setMetadataPreparer prepares the SetMetadata request. -func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "metadata") - req.URL.RawQuery = params.Encode() - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// setMetadataResponder handles the response to the SetMetadata request. -func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobSetMetadataResponse{rawResponse: resp.Response()}, err -} - -// SetTags the Set Tags operation enables users to set tags on a blob. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. versionID is the version id parameter is an opaque DateTime value that, -// when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. -// transactionalContentMD5 is specify the transactional md5 for the body, to be validated by the service. -// transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated by the service. requestID -// is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when -// storage analytics logging is enabled. ifTags is specify a SQL where clause on blob tags to operate only on blobs -// with a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and -// matches this ID. tags is blob tags -func (client blobClient) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, leaseID *string, tags *BlobTags) (*BlobSetTagsResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setTagsPreparer(timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, leaseID, tags) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTagsResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobSetTagsResponse), err -} - -// setTagsPreparer prepares the SetTags request. -func (client blobClient) setTagsPreparer(timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, leaseID *string, tags *BlobTags) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - if versionID != nil && len(*versionID) > 0 { - params.Set("versionid", *versionID) - } - params.Set("comp", "tags") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - if transactionalContentCrc64 != nil { - req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) - } - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - b, err := xml.Marshal(tags) - if err != nil { - return req, pipeline.NewError(err, "failed to marshal request body") - } - req.Header.Set("Content-Type", "application/xml") - err = req.SetBody(bytes.NewReader(b)) - if err != nil { - return req, pipeline.NewError(err, "failed to set request body") - } - return req, nil -} - -// setTagsResponder handles the response to the SetTags request. -func (client blobClient) setTagsResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusNoContent) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobSetTagsResponse{rawResponse: resp.Response()}, err -} - -// SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage -// account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier -// determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive -// storage type. This operation does not update the blob's ETag. -// -// tier is indicates the tier to be set on the blob. snapshot is the snapshot parameter is an opaque DateTime value -// that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, -// see Creating -// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, -// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the -// timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. rehydratePriority is optional: Indicates the priority with which to -// rehydrate an archived blob. requestID is provides a client-generated, opaque value with a 1 KB character limit that -// is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if specified, the operation -// only succeeds if the resource's lease is active and matches this ID. ifTags is specify a SQL where clause on blob -// tags to operate only on blobs with a matching value. -func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string, ifTags *string) (*BlobSetTierResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setTierPreparer(tier, snapshot, versionID, timeout, rehydratePriority, requestID, leaseID, ifTags) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTierResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobSetTierResponse), err -} - -// setTierPreparer prepares the SetTier request. -func (client blobClient) setTierPreparer(tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string, ifTags *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - if versionID != nil && len(*versionID) > 0 { - params.Set("versionid", *versionID) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "tier") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-access-tier", string(tier)) - if rehydratePriority != RehydratePriorityNone { - req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - return req, nil -} - -// setTierResponder handles the response to the SetTier request. -func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobSetTierResponse{rawResponse: resp.Response()}, err -} - -// StartCopyFromURL the Start Copy From URL operation copies a blob or an internet resource to a new blob. -// -// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that -// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob -// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is -// expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated -// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or -// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with -// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version -// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. -// rehydratePriority is optional: Indicates the priority with which to rehydrate an archived blob. -// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the -// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not -// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a -// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// sourceIfTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL -// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation -// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, -// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is -// enabled. blobTagsString is optional. Used to set blob tags in various blob operations. sealBlob is overrides the -// sealed state of the destination blob. Service version 2019-12-12 and newer. immutabilityPolicyExpiry is specifies -// the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the -// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. -func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlobStartCopyFromURLResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, tier, rehydratePriority, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, blobTagsString, sealBlob, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.startCopyFromURLResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobStartCopyFromURLResponse), err -} - -// startCopyFromURLPreparer prepares the StartCopyFromURL request. -func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if tier != AccessTierNone { - req.Header.Set("x-ms-access-tier", string(tier)) - } - if rehydratePriority != RehydratePriorityNone { - req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) - } - if sourceIfModifiedSince != nil { - req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfUnmodifiedSince != nil { - req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfMatch != nil { - req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) - } - if sourceIfNoneMatch != nil { - req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) - } - if sourceIfTags != nil { - req.Header.Set("x-ms-source-if-tags", *sourceIfTags) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-copy-source", copySource) - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if blobTagsString != nil { - req.Header.Set("x-ms-tags", *blobTagsString) - } - if sealBlob != nil { - req.Header.Set("x-ms-seal-blob", strconv.FormatBool(*sealBlob)) - } - if immutabilityPolicyExpiry != nil { - req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { - req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) - } - if legalHold != nil { - req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) - } - return req, nil -} - -// startCopyFromURLResponder handles the response to the StartCopyFromURL request. -func (client blobClient) startCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobStartCopyFromURLResponse{rawResponse: resp.Response()}, err -} - -// Undelete undelete a blob that was previously soft deleted -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blobClient) Undelete(ctx context.Context, timeout *int32, requestID *string) (*BlobUndeleteResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.undeletePreparer(timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.undeleteResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlobUndeleteResponse), err -} - -// undeletePreparer prepares the Undelete request. -func (client blobClient) undeletePreparer(timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "undelete") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// undeleteResponder handles the response to the Undelete request. -func (client blobClient) undeleteResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlobUndeleteResponse{rawResponse: resp.Response()}, err -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go deleted file mode 100644 index 31067ed64e5..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go +++ /dev/null @@ -1,848 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" -) - -// blockBlobClient is the client for the BlockBlob methods of the Azblob service. -type blockBlobClient struct { - managementClient -} - -// newBlockBlobClient creates an instance of the blockBlobClient client. -func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { - return blockBlobClient{newManagementClient(url, p)} -} - -// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the -// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior -// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, -// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from -// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the -// block, whichever list it may belong to. -// -// blocks is blob Blocks. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, -// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's -// content type. If specified, this property is stored with the blob and returned with a read request. -// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the -// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If -// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An -// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were -// validated when each was uploaded. transactionalContentMD5 is specify the transactional md5 for the body, to be -// validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated -// by the service. metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no -// name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination -// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, -// and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names -// must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for -// more information. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches -// this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. -// Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is -// performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage -// Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the -// x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key -// hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is -// provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to -// use to encrypt the data provided in the request. If not specified, encryption is performed with the default account -// encryption scope. For more information, see Encryption at Rest for Azure Storage Services. tier is optional. -// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate -// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob -// operations. immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. -// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a -// legal hold should be set on the blob. -func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlockBlobCommitBlockListResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlockBlobCommitBlockListResponse), err -} - -// commitBlockListPreparer prepares the CommitBlockList request. -func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "blocklist") - req.URL.RawQuery = params.Encode() - if blobCacheControl != nil { - req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) - } - if blobContentType != nil { - req.Header.Set("x-ms-blob-content-type", *blobContentType) - } - if blobContentEncoding != nil { - req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) - } - if blobContentLanguage != nil { - req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) - } - if blobContentMD5 != nil { - req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) - } - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - if transactionalContentCrc64 != nil { - req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) - } - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if blobContentDisposition != nil { - req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if tier != AccessTierNone { - req.Header.Set("x-ms-access-tier", string(tier)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if blobTagsString != nil { - req.Header.Set("x-ms-tags", *blobTagsString) - } - if immutabilityPolicyExpiry != nil { - req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { - req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) - } - if legalHold != nil { - req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) - } - b, err := xml.Marshal(blocks) - if err != nil { - return req, pipeline.NewError(err, "failed to marshal request body") - } - req.Header.Set("Content-Type", "application/xml") - err = req.SetBody(bytes.NewReader(b)) - if err != nil { - return req, pipeline.NewError(err, "failed to set request body") - } - return req, nil -} - -// commitBlockListResponder handles the response to the CommitBlockList request. -func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err -} - -// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block -// blob -// -// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists -// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob -// snapshot to retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. ifTags is specify a SQL where clause on blob tags to operate only on blobs with -// a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is -// recorded in the analytics logs when storage analytics logging is enabled. -func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (*BlockList, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlockList), err -} - -// getBlockListPreparer prepares the GetBlockList request. -func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - params.Set("blocklisttype", string(listType)) - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "blocklist") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getBlockListResponder handles the response to the GetBlockList request. -func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &BlockList{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// PutBlobFromURL the Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from -// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not supported with -// Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform -// partial updates to a block blob’s contents using a source URL, use the Put Block from URL API in conjunction with -// Put Block List. -// -// contentLength is the length of the request. copySource is specifies the name of the source page blob snapshot. This -// value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it -// would appear in a request URI. The source blob must either be public or must be authenticated via a shared access -// signature. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to -// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property -// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content -// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage -// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with -// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, -// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets -// the blob's cache control. If specified, this property is stored with the blob and returned with a read request. -// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are -// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more -// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not -// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the -// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. -// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies -// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed -// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. -// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key -// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the -// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is -// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data -// provided in the request. If not specified, encryption is performed with the default account encryption scope. For -// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set -// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since -// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been -// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching -// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a -// SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is specify this -// header value to operate only on a blob if it has been modified since the specified date/time. -// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the -// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. -// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. sourceIfTags is -// specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be -// read from the copy source. blobTagsString is optional. Used to set blob tags in various blob operations. -// copySourceBlobProperties is optional, default is true. Indicates if properties from the source blob should be -// copied. copySourceAuthorization is only Bearer type is supported. Credentials should be a valid OAuth access token -// to copy source. -func (client blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, copySourceBlobProperties *bool, copySourceAuthorization *string) (*BlockBlobPutBlobFromURLResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.putBlobFromURLPreparer(contentLength, copySource, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, requestID, sourceContentMD5, blobTagsString, copySourceBlobProperties, copySourceAuthorization) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.putBlobFromURLResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlockBlobPutBlobFromURLResponse), err -} - -// putBlobFromURLPreparer prepares the PutBlobFromURL request. -func (client blockBlobClient) putBlobFromURLPreparer(contentLength int64, copySource string, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, copySourceBlobProperties *bool, copySourceAuthorization *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobContentType != nil { - req.Header.Set("x-ms-blob-content-type", *blobContentType) - } - if blobContentEncoding != nil { - req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) - } - if blobContentLanguage != nil { - req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) - } - if blobContentMD5 != nil { - req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) - } - if blobCacheControl != nil { - req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) - } - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if blobContentDisposition != nil { - req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if tier != AccessTierNone { - req.Header.Set("x-ms-access-tier", string(tier)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - if sourceIfModifiedSince != nil { - req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfUnmodifiedSince != nil { - req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfMatch != nil { - req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) - } - if sourceIfNoneMatch != nil { - req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) - } - if sourceIfTags != nil { - req.Header.Set("x-ms-source-if-tags", *sourceIfTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if sourceContentMD5 != nil { - req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) - } - if blobTagsString != nil { - req.Header.Set("x-ms-tags", *blobTagsString) - } - req.Header.Set("x-ms-copy-source", copySource) - if copySourceBlobProperties != nil { - req.Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*copySourceBlobProperties)) - } - if copySourceAuthorization != nil { - req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) - } - req.Header.Set("x-ms-blob-type", "BlockBlob") - return req, nil -} - -// putBlobFromURLResponder handles the response to the PutBlobFromURL request. -func (client blockBlobClient) putBlobFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlockBlobPutBlobFromURLResponse{rawResponse: resp.Response()}, err -} - -// StageBlock the Stage Block operation creates a new block to be committed as part of a blob -// -// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or -// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the -// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon -// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the -// transactional md5 for the body, to be validated by the service. transactionalContentCrc64 is specify the -// transactional crc64 for the body, to be validated by the service. timeout is the timeout parameter is expressed in -// seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the -// data provided in the request. If not specified, encryption is performed with the root account encryption key. For -// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the -// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the -// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided -// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies -// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is -// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage -// Services. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the -// analytics logs when storage analytics logging is enabled. -func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (*BlockBlobStageBlockResponse, error) { - if err := validate([]validation{ - {targetValue: body, - constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlockBlobStageBlockResponse), err -} - -// stageBlockPreparer prepares the StageBlock request. -func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, body) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - params.Set("blockid", blockID) - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "block") - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - if transactionalContentCrc64 != nil { - req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// stageBlockResponder handles the response to the StageBlock request. -func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err -} - -// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents -// are read from a URL. -// -// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or -// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the -// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source. -// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the -// range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the -// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For -// more information, see Setting -// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt -// the data provided in the request. If not specified, encryption is performed with the root account encryption key. -// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of -// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is -// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be -// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. -// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, -// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for -// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and -// matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been -// modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate -// only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded -// in the analytics logs when storage analytics logging is enabled. copySourceAuthorization is only Bearer type is -// supported. Credentials should be a valid OAuth access token to copy source. -func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*BlockBlobStageBlockFromURLResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlockBlobStageBlockFromURLResponse), err -} - -// stageBlockFromURLPreparer prepares the StageBlockFromURL request. -func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - params.Set("blockid", blockID) - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "block") - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Header.Set("x-ms-copy-source", sourceURL) - if sourceRange != nil { - req.Header.Set("x-ms-source-range", *sourceRange) - } - if sourceContentMD5 != nil { - req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) - } - if sourceContentcrc64 != nil { - req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if sourceIfModifiedSince != nil { - req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfUnmodifiedSince != nil { - req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfMatch != nil { - req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) - } - if sourceIfNoneMatch != nil { - req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if copySourceAuthorization != nil { - req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) - } - return req, nil -} - -// stageBlockFromURLResponder handles the response to the StageBlockFromURL request. -func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err -} - -// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block -// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of -// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a -// block blob, use the Put Block List operation. -// -// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an -// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more -// information, see Setting -// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to -// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property -// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content -// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage -// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with -// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, -// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets -// the blob's cache control. If specified, this property is stored with the blob and returned with a read request. -// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are -// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more -// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not -// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the -// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. -// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies -// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed -// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. -// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key -// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the -// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is -// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data -// provided in the request. If not specified, encryption is performed with the default account encryption scope. For -// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set -// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since -// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been -// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching -// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a -// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. -// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. -// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a -// legal hold should be set on the blob. -func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlockBlobUploadResponse, error) { - if err := validate([]validation{ - {targetValue: body, - constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) - if err != nil { - return nil, err - } - return resp.(*BlockBlobUploadResponse), err -} - -// uploadPreparer prepares the Upload request. -func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, body) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if blobContentType != nil { - req.Header.Set("x-ms-blob-content-type", *blobContentType) - } - if blobContentEncoding != nil { - req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) - } - if blobContentLanguage != nil { - req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) - } - if blobContentMD5 != nil { - req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) - } - if blobCacheControl != nil { - req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) - } - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if blobContentDisposition != nil { - req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if tier != AccessTierNone { - req.Header.Set("x-ms-access-tier", string(tier)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if blobTagsString != nil { - req.Header.Set("x-ms-tags", *blobTagsString) - } - if immutabilityPolicyExpiry != nil { - req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { - req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) - } - if legalHold != nil { - req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) - } - req.Header.Set("x-ms-blob-type", "BlockBlob") - return req, nil -} - -// uploadResponder handles the response to the Upload request. -func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go deleted file mode 100644 index 0db347e3459..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go +++ /dev/null @@ -1,38 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/azure-pipeline-go/pipeline" - "net/url" -) - -const ( - // ServiceVersion specifies the version of the operations used in this package. - ServiceVersion = "2020-10-02" -) - -// managementClient is the base client for Azblob. -type managementClient struct { - url url.URL - p pipeline.Pipeline -} - -// newManagementClient creates an instance of the managementClient client. -func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { - return managementClient{ - url: url, - p: p, - } -} - -// URL returns a copy of the URL for this client. -func (mc managementClient) URL() url.URL { - return mc.url -} - -// Pipeline returns the pipeline for this client. -func (mc managementClient) Pipeline() pipeline.Pipeline { - return mc.p -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go deleted file mode 100644 index 2e2f176e548..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go +++ /dev/null @@ -1,1232 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" -) - -// containerClient is the client for the Container methods of the Azblob service. -type containerClient struct { - managementClient -} - -// newContainerClient creates an instance of the containerClient client. -func newContainerClient(url url.URL, p pipeline.Pipeline) containerClient { - return containerClient{newManagementClient(url, p)} -} - -// AcquireLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be -// 15 to 60 seconds, or can be infinite -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative -// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration -// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob -// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor -// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client containerClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerAcquireLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerAcquireLeaseResponse), err -} - -// acquireLeasePreparer prepares the AcquireLease request. -func (client containerClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - if duration != nil { - req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) - } - if proposedLeaseID != nil { - req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "acquire") - return req, nil -} - -// acquireLeaseResponder handles the response to the AcquireLease request. -func (client containerClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerAcquireLeaseResponse{rawResponse: resp.Response()}, err -} - -// BreakLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 -// to 60 seconds, or can be infinite -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should -// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the -// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available -// before the break period has expired, but the lease may be held for longer than the break period. If this header does -// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an -// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has -// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque -// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerBreakLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerBreakLeaseResponse), err -} - -// breakLeasePreparer prepares the BreakLease request. -func (client containerClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - if breakPeriod != nil { - req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "break") - return req, nil -} - -// breakLeaseResponder handles the response to the BreakLease request. -func (client containerClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerBreakLeaseResponse{rawResponse: resp.Response()}, err -} - -// ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be -// 15 to 60 seconds, or can be infinite -// -// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string -// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See -// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in -// seconds. For more information, see Setting -// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, -// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is -// enabled. -func (client containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerChangeLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerChangeLeaseResponse), err -} - -// changeLeasePreparer prepares the ChangeLease request. -func (client containerClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-lease-id", leaseID) - req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "change") - return req, nil -} - -// changeLeaseResponder handles the response to the ChangeLease request. -func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerChangeLeaseResponse{rawResponse: resp.Response()}, err -} - -// Create creates a new container under the specified account. If the container with the same name already exists, the -// operation fails -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated -// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or -// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with -// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version -// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be -// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -// defaultEncryptionScope is optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on -// the container and use for all future writes. preventEncryptionScopeOverride is optional. Version 2019-07-07 and -// newer. If true, prevents any request from specifying a different encryption scope than the scope set on the -// container. -func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (*ContainerCreateResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.createPreparer(timeout, metadata, access, requestID, defaultEncryptionScope, preventEncryptionScopeOverride) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerCreateResponse), err -} - -// createPreparer prepares the Create request. -func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if access != PublicAccessNone { - req.Header.Set("x-ms-blob-public-access", string(access)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if defaultEncryptionScope != nil { - req.Header.Set("x-ms-default-encryption-scope", *defaultEncryptionScope) - } - if preventEncryptionScopeOverride != nil { - req.Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*preventEncryptionScopeOverride)) - } - return req, nil -} - -// createResponder handles the response to the Create request. -func (client containerClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerCreateResponse{rawResponse: resp.Response()}, err -} - -// Delete operation marks the specified container for deletion. The container and any blobs contained within it are -// later deleted during garbage collection -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it -// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque -// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) Delete(ctx context.Context, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerDeleteResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.deletePreparer(timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerDeleteResponse), err -} - -// deletePreparer prepares the Delete request. -func (client containerClient) deletePreparer(timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("DELETE", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// deleteResponder handles the response to the Delete request. -func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerDeleteResponse{rawResponse: resp.Response()}, err -} - -// GetAccessPolicy gets the permissions for the specified container. The permissions indicate whether container data -// may be accessed publicly. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character -// limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getAccessPolicyPreparer(timeout, leaseID, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessPolicyResponder}, req) - if err != nil { - return nil, err - } - return resp.(*SignedIdentifiers), err -} - -// getAccessPolicyPreparer prepares the GetAccessPolicy request. -func (client containerClient) getAccessPolicyPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "acl") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getAccessPolicyResponder handles the response to the GetAccessPolicy request. -func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &SignedIdentifiers{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// GetAccountInfo returns the sku name and account kind -func (client containerClient) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { - req, err := client.getAccountInfoPreparer() - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerGetAccountInfoResponse), err -} - -// getAccountInfoPreparer prepares the GetAccountInfo request. -func (client containerClient) getAccountInfoPreparer() (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - params.Set("restype", "account") - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - return req, nil -} - -// getAccountInfoResponder handles the response to the GetAccountInfo request. -func (client containerClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerGetAccountInfoResponse{rawResponse: resp.Response()}, err -} - -// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned -// does not include the container's list of blobs -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character -// limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getPropertiesPreparer(timeout, leaseID, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerGetPropertiesResponse), err -} - -// getPropertiesPreparer prepares the GetProperties request. -func (client containerClient) getPropertiesPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getPropertiesResponder handles the response to the GetProperties request. -func (client containerClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerGetPropertiesResponse{rawResponse: resp.Response()}, err -} - -// ListBlobFlatSegment [Update] The List Blobs operation returns a list of the blobs under the specified container -// -// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a -// string value that identifies the portion of the list of containers to be returned with the next listing operation. -// The operation returns the NextMarker value within the response body if the listing operation did not return all -// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the -// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the -// client. maxresults is specifies the maximum number of containers to return. If the request does not specify -// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the -// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the -// remainder of the results. For this reason, it is possible that the service will return fewer results than specified -// by maxresults, or than the default of 5000. include is include this parameter to specify one or more datasets to -// include in the response. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) ListBlobFlatSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsFlatSegmentResponse, error) { - if err := validate([]validation{ - {targetValue: maxresults, - constraints: []constraint{{target: "maxresults", name: null, rule: false, - chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.listBlobFlatSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobFlatSegmentResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ListBlobsFlatSegmentResponse), err -} - -// listBlobFlatSegmentPreparer prepares the ListBlobFlatSegment request. -func (client containerClient) listBlobFlatSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if prefix != nil && len(*prefix) > 0 { - params.Set("prefix", *prefix) - } - if marker != nil && len(*marker) > 0 { - params.Set("marker", *marker) - } - if maxresults != nil { - params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) - } - if include != nil && len(include) > 0 { - params.Set("include", joinConst(include, ",")) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "list") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// listBlobFlatSegmentResponder handles the response to the ListBlobFlatSegment request. -func (client containerClient) listBlobFlatSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &ListBlobsFlatSegmentResponse{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// ListBlobHierarchySegment [Update] The List Blobs operation returns a list of the blobs under the specified container -// -// delimiter is when the request includes this parameter, the operation returns a BlobPrefix element in the response -// body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the -// delimiter character. The delimiter may be a single character or a string. prefix is filters the results to return -// only containers whose name begins with the specified prefix. marker is a string value that identifies the portion of -// the list of containers to be returned with the next listing operation. The operation returns the NextMarker value -// within the response body if the listing operation did not return all containers remaining to be listed with the -// current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request -// the next page of list items. The marker value is opaque to the client. maxresults is specifies the maximum number of -// containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server -// will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will -// return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the -// service will return fewer results than specified by maxresults, or than the default of 5000. include is include this -// parameter to specify one or more datasets to include in the response. timeout is the timeout parameter is expressed -// in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) ListBlobHierarchySegment(ctx context.Context, delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsHierarchySegmentResponse, error) { - if err := validate([]validation{ - {targetValue: maxresults, - constraints: []constraint{{target: "maxresults", name: null, rule: false, - chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.listBlobHierarchySegmentPreparer(delimiter, prefix, marker, maxresults, include, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobHierarchySegmentResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ListBlobsHierarchySegmentResponse), err -} - -// listBlobHierarchySegmentPreparer prepares the ListBlobHierarchySegment request. -func (client containerClient) listBlobHierarchySegmentPreparer(delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if prefix != nil && len(*prefix) > 0 { - params.Set("prefix", *prefix) - } - params.Set("delimiter", delimiter) - if marker != nil && len(*marker) > 0 { - params.Set("marker", *marker) - } - if maxresults != nil { - params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) - } - if include != nil && len(include) > 0 { - params.Set("include", joinConst(include, ",")) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "list") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// listBlobHierarchySegmentResponder handles the response to the ListBlobHierarchySegment request. -func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &ListBlobsHierarchySegmentResponse{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be -// 15 to 60 seconds, or can be infinite -// -// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. -// For more information, see Setting -// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, -// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is -// enabled. -func (client containerClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerReleaseLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerReleaseLeaseResponse), err -} - -// releaseLeasePreparer prepares the ReleaseLease request. -func (client containerClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-lease-id", leaseID) - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "release") - return req, nil -} - -// releaseLeaseResponder handles the response to the ReleaseLease request. -func (client containerClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerReleaseLeaseResponse{rawResponse: resp.Response()}, err -} - -// Rename renames an existing container. -// -// sourceContainerName is required. Specifies the name of the container to rename. timeout is the timeout parameter is -// expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. sourceLeaseID is a -// lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. -func (client containerClient) Rename(ctx context.Context, sourceContainerName string, timeout *int32, requestID *string, sourceLeaseID *string) (*ContainerRenameResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.renamePreparer(sourceContainerName, timeout, requestID, sourceLeaseID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renameResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerRenameResponse), err -} - -// renamePreparer prepares the Rename request. -func (client containerClient) renamePreparer(sourceContainerName string, timeout *int32, requestID *string, sourceLeaseID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "rename") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-source-container-name", sourceContainerName) - if sourceLeaseID != nil { - req.Header.Set("x-ms-source-lease-id", *sourceLeaseID) - } - return req, nil -} - -// renameResponder handles the response to the Rename request. -func (client containerClient) renameResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerRenameResponse{rawResponse: resp.Response()}, err -} - -// RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 -// to 60 seconds, or can be infinite -// -// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. -// For more information, see Setting -// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, -// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is -// enabled. -func (client containerClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerRenewLeaseResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerRenewLeaseResponse), err -} - -// renewLeasePreparer prepares the RenewLease request. -func (client containerClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "lease") - params.Set("restype", "container") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-lease-id", leaseID) - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-lease-action", "renew") - return req, nil -} - -// renewLeaseResponder handles the response to the RenewLease request. -func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err -} - -// Restore restores a previously-deleted container. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -// deletedContainerName is optional. Version 2019-12-12 and later. Specifies the name of the deleted container to -// restore. deletedContainerVersion is optional. Version 2019-12-12 and later. Specifies the version of the deleted -// container to restore. -func (client containerClient) Restore(ctx context.Context, timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (*ContainerRestoreResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.restorePreparer(timeout, requestID, deletedContainerName, deletedContainerVersion) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.restoreResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerRestoreResponse), err -} - -// restorePreparer prepares the Restore request. -func (client containerClient) restorePreparer(timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "undelete") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if deletedContainerName != nil { - req.Header.Set("x-ms-deleted-container-name", *deletedContainerName) - } - if deletedContainerVersion != nil { - req.Header.Set("x-ms-deleted-container-version", *deletedContainerVersion) - } - return req, nil -} - -// restoreResponder handles the response to the Restore request. -func (client containerClient) restoreResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerRestoreResponse{rawResponse: resp.Response()}, err -} - -// SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a -// container may be accessed publicly. -// -// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more -// information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and -// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified -// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has -// not been modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) SetAccessPolicy(ctx context.Context, containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerSetAccessPolicyResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setAccessPolicyPreparer(containerACL, timeout, leaseID, access, ifModifiedSince, ifUnmodifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessPolicyResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerSetAccessPolicyResponse), err -} - -// setAccessPolicyPreparer prepares the SetAccessPolicy request. -func (client containerClient) setAccessPolicyPreparer(containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "acl") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if access != PublicAccessNone { - req.Header.Set("x-ms-blob-public-access", string(access)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - b, err := xml.Marshal(SignedIdentifiers{Items: containerACL}) - if err != nil { - return req, pipeline.NewError(err, "failed to marshal request body") - } - req.Header.Set("Content-Type", "application/xml") - err = req.SetBody(bytes.NewReader(b)) - if err != nil { - return req, pipeline.NewError(err, "failed to set request body") - } - return req, nil -} - -// setAccessPolicyResponder handles the response to the SetAccessPolicy request. -func (client containerClient) setAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerSetAccessPolicyResponse{rawResponse: resp.Response()}, err -} - -// SetMetadata operation sets one or more user-defined name-value pairs for the specified container. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with -// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to -// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the -// specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version -// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only -// on a blob if it has been modified since the specified date/time. requestID is provides a client-generated, opaque -// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (*ContainerSetMetadataResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ContainerSetMetadataResponse), err -} - -// setMetadataPreparer prepares the SetMetadata request. -func (client containerClient) setMetadataPreparer(timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "metadata") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// setMetadataResponder handles the response to the SetMetadata request. -func (client containerClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ContainerSetMetadataResponse{rawResponse: resp.Response()}, err -} - -// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. -// -// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an -// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be -// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the -// timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { - if err := validate([]validation{ - {targetValue: body, - constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) - if err != nil { - return nil, err - } - return resp.(*SubmitBatchResponse), err -} - -// submitBatchPreparer prepares the SubmitBatch request. -func (client containerClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("POST", client.url, body) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "container") - params.Set("comp", "batch") - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Header.Set("Content-Type", multipartContentType) - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// submitBatchResponder handles the response to the SubmitBatch request. -func (client containerClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - return &SubmitBatchResponse{rawResponse: resp.Response()}, err -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go deleted file mode 100644 index ec872a3225a..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go +++ /dev/null @@ -1,7302 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "encoding/base64" - "encoding/xml" - "errors" - "io" - "net/http" - "reflect" - "strconv" - "strings" - "time" - "unsafe" -) - -// ETag is an entity tag. -type ETag string - -const ( - // ETagNone represents an empty entity tag. - ETagNone ETag = "" - - // ETagAny matches any entity tag. - ETagAny ETag = "*" -) - -// Metadata contains metadata key/value pairs. -type Metadata map[string]string - -const mdPrefix = "x-ms-meta-" - -const mdPrefixLen = len(mdPrefix) - -// UnmarshalXML implements the xml.Unmarshaler interface for Metadata. -func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - tokName := "" - for t, err := d.Token(); err == nil; t, err = d.Token() { - switch tt := t.(type) { - case xml.StartElement: - tokName = strings.ToLower(tt.Name.Local) - break - case xml.CharData: - if *md == nil { - *md = Metadata{} - } - (*md)[tokName] = string(tt) - break - } - } - return nil -} - -// Marker represents an opaque value used in paged responses. -type Marker struct { - Val *string -} - -// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true -// for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from -// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only -// after the service has returned the final result portion. -func (m Marker) NotDone() bool { - return m.Val == nil || *m.Val != "" -} - -// UnmarshalXML implements the xml.Unmarshaler interface for Marker. -func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var out string - err := d.DecodeElement(&out, &start) - m.Val = &out - return err -} - -// concatenates a slice of const values with the specified separator between each item -func joinConst(s interface{}, sep string) string { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - panic("s wasn't a slice or array") - } - ss := make([]string, 0, v.Len()) - for i := 0; i < v.Len(); i++ { - ss = append(ss, v.Index(i).String()) - } - return strings.Join(ss, sep) -} - -func validateError(err error) { - if err != nil { - panic(err) - } -} - -// AccessTierType enumerates the values for access tier type. -type AccessTierType string - -const ( - // AccessTierArchive ... - AccessTierArchive AccessTierType = "Archive" - // AccessTierCool ... - AccessTierCool AccessTierType = "Cool" - // AccessTierHot ... - AccessTierHot AccessTierType = "Hot" - // AccessTierNone represents an empty AccessTierType. - AccessTierNone AccessTierType = "" - // AccessTierP10 ... - AccessTierP10 AccessTierType = "P10" - // AccessTierP15 ... - AccessTierP15 AccessTierType = "P15" - // AccessTierP20 ... - AccessTierP20 AccessTierType = "P20" - // AccessTierP30 ... - AccessTierP30 AccessTierType = "P30" - // AccessTierP4 ... - AccessTierP4 AccessTierType = "P4" - // AccessTierP40 ... - AccessTierP40 AccessTierType = "P40" - // AccessTierP50 ... - AccessTierP50 AccessTierType = "P50" - // AccessTierP6 ... - AccessTierP6 AccessTierType = "P6" - // AccessTierP60 ... - AccessTierP60 AccessTierType = "P60" - // AccessTierP70 ... - AccessTierP70 AccessTierType = "P70" - // AccessTierP80 ... - AccessTierP80 AccessTierType = "P80" -) - -// PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type. -func PossibleAccessTierTypeValues() []AccessTierType { - return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP15, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6, AccessTierP60, AccessTierP70, AccessTierP80} -} - -// AccountKindType enumerates the values for account kind type. -type AccountKindType string - -const ( - // AccountKindBlobStorage ... - AccountKindBlobStorage AccountKindType = "BlobStorage" - // AccountKindBlockBlobStorage ... - AccountKindBlockBlobStorage AccountKindType = "BlockBlobStorage" - // AccountKindFileStorage ... - AccountKindFileStorage AccountKindType = "FileStorage" - // AccountKindNone represents an empty AccountKindType. - AccountKindNone AccountKindType = "" - // AccountKindStorage ... - AccountKindStorage AccountKindType = "Storage" - // AccountKindStorageV2 ... - AccountKindStorageV2 AccountKindType = "StorageV2" -) - -// PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type. -func PossibleAccountKindTypeValues() []AccountKindType { - return []AccountKindType{AccountKindBlobStorage, AccountKindBlockBlobStorage, AccountKindFileStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2} -} - -// ArchiveStatusType enumerates the values for archive status type. -type ArchiveStatusType string - -const ( - // ArchiveStatusNone represents an empty ArchiveStatusType. - ArchiveStatusNone ArchiveStatusType = "" - // ArchiveStatusRehydratePendingToCool ... - ArchiveStatusRehydratePendingToCool ArchiveStatusType = "rehydrate-pending-to-cool" - // ArchiveStatusRehydratePendingToHot ... - ArchiveStatusRehydratePendingToHot ArchiveStatusType = "rehydrate-pending-to-hot" -) - -// PossibleArchiveStatusTypeValues returns an array of possible values for the ArchiveStatusType const type. -func PossibleArchiveStatusTypeValues() []ArchiveStatusType { - return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot} -} - -// BlobDeleteType enumerates the values for blob delete type. -type BlobDeleteType string - -const ( - // BlobDeleteNone represents an empty BlobDeleteType. - BlobDeleteNone BlobDeleteType = "" - // BlobDeletePermanent ... - BlobDeletePermanent BlobDeleteType = "Permanent" -) - -// PossibleBlobDeleteTypeValues returns an array of possible values for the BlobDeleteType const type. -func PossibleBlobDeleteTypeValues() []BlobDeleteType { - return []BlobDeleteType{BlobDeleteNone, BlobDeletePermanent} -} - -// BlobExpiryOptionsType enumerates the values for blob expiry options type. -type BlobExpiryOptionsType string - -const ( - // BlobExpiryOptionsAbsolute ... - BlobExpiryOptionsAbsolute BlobExpiryOptionsType = "Absolute" - // BlobExpiryOptionsNeverExpire ... - BlobExpiryOptionsNeverExpire BlobExpiryOptionsType = "NeverExpire" - // BlobExpiryOptionsNone represents an empty BlobExpiryOptionsType. - BlobExpiryOptionsNone BlobExpiryOptionsType = "" - // BlobExpiryOptionsRelativeToCreation ... - BlobExpiryOptionsRelativeToCreation BlobExpiryOptionsType = "RelativeToCreation" - // BlobExpiryOptionsRelativeToNow ... - BlobExpiryOptionsRelativeToNow BlobExpiryOptionsType = "RelativeToNow" -) - -// PossibleBlobExpiryOptionsTypeValues returns an array of possible values for the BlobExpiryOptionsType const type. -func PossibleBlobExpiryOptionsTypeValues() []BlobExpiryOptionsType { - return []BlobExpiryOptionsType{BlobExpiryOptionsAbsolute, BlobExpiryOptionsNeverExpire, BlobExpiryOptionsNone, BlobExpiryOptionsRelativeToCreation, BlobExpiryOptionsRelativeToNow} -} - -// BlobImmutabilityPolicyModeType enumerates the values for blob immutability policy mode type. -type BlobImmutabilityPolicyModeType string - -const ( - // BlobImmutabilityPolicyModeLocked ... - BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyModeType = "locked" - // BlobImmutabilityPolicyModeMutable ... - BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyModeType = "mutable" - // BlobImmutabilityPolicyModeNone represents an empty BlobImmutabilityPolicyModeType. - BlobImmutabilityPolicyModeNone BlobImmutabilityPolicyModeType = "" - // BlobImmutabilityPolicyModeUnlocked ... - BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyModeType = "unlocked" -) - -// PossibleBlobImmutabilityPolicyModeTypeValues returns an array of possible values for the BlobImmutabilityPolicyModeType const type. -func PossibleBlobImmutabilityPolicyModeTypeValues() []BlobImmutabilityPolicyModeType { - return []BlobImmutabilityPolicyModeType{BlobImmutabilityPolicyModeLocked, BlobImmutabilityPolicyModeMutable, BlobImmutabilityPolicyModeNone, BlobImmutabilityPolicyModeUnlocked} -} - -// BlobType enumerates the values for blob type. -type BlobType string - -const ( - // BlobAppendBlob ... - BlobAppendBlob BlobType = "AppendBlob" - // BlobBlockBlob ... - BlobBlockBlob BlobType = "BlockBlob" - // BlobNone represents an empty BlobType. - BlobNone BlobType = "" - // BlobPageBlob ... - BlobPageBlob BlobType = "PageBlob" -) - -// PossibleBlobTypeValues returns an array of possible values for the BlobType const type. -func PossibleBlobTypeValues() []BlobType { - return []BlobType{BlobAppendBlob, BlobBlockBlob, BlobNone, BlobPageBlob} -} - -// BlockListType enumerates the values for block list type. -type BlockListType string - -const ( - // BlockListAll ... - BlockListAll BlockListType = "all" - // BlockListCommitted ... - BlockListCommitted BlockListType = "committed" - // BlockListNone represents an empty BlockListType. - BlockListNone BlockListType = "" - // BlockListUncommitted ... - BlockListUncommitted BlockListType = "uncommitted" -) - -// PossibleBlockListTypeValues returns an array of possible values for the BlockListType const type. -func PossibleBlockListTypeValues() []BlockListType { - return []BlockListType{BlockListAll, BlockListCommitted, BlockListNone, BlockListUncommitted} -} - -// CopyStatusType enumerates the values for copy status type. -type CopyStatusType string - -const ( - // CopyStatusAborted ... - CopyStatusAborted CopyStatusType = "aborted" - // CopyStatusFailed ... - CopyStatusFailed CopyStatusType = "failed" - // CopyStatusNone represents an empty CopyStatusType. - CopyStatusNone CopyStatusType = "" - // CopyStatusPending ... - CopyStatusPending CopyStatusType = "pending" - // CopyStatusSuccess ... - CopyStatusSuccess CopyStatusType = "success" -) - -// PossibleCopyStatusTypeValues returns an array of possible values for the CopyStatusType const type. -func PossibleCopyStatusTypeValues() []CopyStatusType { - return []CopyStatusType{CopyStatusAborted, CopyStatusFailed, CopyStatusNone, CopyStatusPending, CopyStatusSuccess} -} - -// DeleteSnapshotsOptionType enumerates the values for delete snapshots option type. -type DeleteSnapshotsOptionType string - -const ( - // DeleteSnapshotsOptionInclude ... - DeleteSnapshotsOptionInclude DeleteSnapshotsOptionType = "include" - // DeleteSnapshotsOptionNone represents an empty DeleteSnapshotsOptionType. - DeleteSnapshotsOptionNone DeleteSnapshotsOptionType = "" - // DeleteSnapshotsOptionOnly ... - DeleteSnapshotsOptionOnly DeleteSnapshotsOptionType = "only" -) - -// PossibleDeleteSnapshotsOptionTypeValues returns an array of possible values for the DeleteSnapshotsOptionType const type. -func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { - return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly} -} - -// EncryptionAlgorithmType enumerates the values for encryption algorithm type. -type EncryptionAlgorithmType string - -const ( - // EncryptionAlgorithmAES256 ... - EncryptionAlgorithmAES256 EncryptionAlgorithmType = "AES256" - // EncryptionAlgorithmNone represents an empty EncryptionAlgorithmType. - EncryptionAlgorithmNone EncryptionAlgorithmType = "" -) - -// PossibleEncryptionAlgorithmTypeValues returns an array of possible values for the EncryptionAlgorithmType const type. -func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { - return []EncryptionAlgorithmType{EncryptionAlgorithmAES256, EncryptionAlgorithmNone} -} - -// GeoReplicationStatusType enumerates the values for geo replication status type. -type GeoReplicationStatusType string - -const ( - // GeoReplicationStatusBootstrap ... - GeoReplicationStatusBootstrap GeoReplicationStatusType = "bootstrap" - // GeoReplicationStatusLive ... - GeoReplicationStatusLive GeoReplicationStatusType = "live" - // GeoReplicationStatusNone represents an empty GeoReplicationStatusType. - GeoReplicationStatusNone GeoReplicationStatusType = "" - // GeoReplicationStatusUnavailable ... - GeoReplicationStatusUnavailable GeoReplicationStatusType = "unavailable" -) - -// PossibleGeoReplicationStatusTypeValues returns an array of possible values for the GeoReplicationStatusType const type. -func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType { - return []GeoReplicationStatusType{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusNone, GeoReplicationStatusUnavailable} -} - -// LeaseDurationType enumerates the values for lease duration type. -type LeaseDurationType string - -const ( - // LeaseDurationFixed ... - LeaseDurationFixed LeaseDurationType = "fixed" - // LeaseDurationInfinite ... - LeaseDurationInfinite LeaseDurationType = "infinite" - // LeaseDurationNone represents an empty LeaseDurationType. - LeaseDurationNone LeaseDurationType = "" -) - -// PossibleLeaseDurationTypeValues returns an array of possible values for the LeaseDurationType const type. -func PossibleLeaseDurationTypeValues() []LeaseDurationType { - return []LeaseDurationType{LeaseDurationFixed, LeaseDurationInfinite, LeaseDurationNone} -} - -// LeaseStateType enumerates the values for lease state type. -type LeaseStateType string - -const ( - // LeaseStateAvailable ... - LeaseStateAvailable LeaseStateType = "available" - // LeaseStateBreaking ... - LeaseStateBreaking LeaseStateType = "breaking" - // LeaseStateBroken ... - LeaseStateBroken LeaseStateType = "broken" - // LeaseStateExpired ... - LeaseStateExpired LeaseStateType = "expired" - // LeaseStateLeased ... - LeaseStateLeased LeaseStateType = "leased" - // LeaseStateNone represents an empty LeaseStateType. - LeaseStateNone LeaseStateType = "" -) - -// PossibleLeaseStateTypeValues returns an array of possible values for the LeaseStateType const type. -func PossibleLeaseStateTypeValues() []LeaseStateType { - return []LeaseStateType{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased, LeaseStateNone} -} - -// LeaseStatusType enumerates the values for lease status type. -type LeaseStatusType string - -const ( - // LeaseStatusLocked ... - LeaseStatusLocked LeaseStatusType = "locked" - // LeaseStatusNone represents an empty LeaseStatusType. - LeaseStatusNone LeaseStatusType = "" - // LeaseStatusUnlocked ... - LeaseStatusUnlocked LeaseStatusType = "unlocked" -) - -// PossibleLeaseStatusTypeValues returns an array of possible values for the LeaseStatusType const type. -func PossibleLeaseStatusTypeValues() []LeaseStatusType { - return []LeaseStatusType{LeaseStatusLocked, LeaseStatusNone, LeaseStatusUnlocked} -} - -// ListBlobsIncludeItemType enumerates the values for list blobs include item type. -type ListBlobsIncludeItemType string - -const ( - // ListBlobsIncludeItemCopy ... - ListBlobsIncludeItemCopy ListBlobsIncludeItemType = "copy" - // ListBlobsIncludeItemDeleted ... - ListBlobsIncludeItemDeleted ListBlobsIncludeItemType = "deleted" - // ListBlobsIncludeItemDeletedwithversions ... - ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItemType = "deletedwithversions" - // ListBlobsIncludeItemImmutabilitypolicy ... - ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItemType = "immutabilitypolicy" - // ListBlobsIncludeItemLegalhold ... - ListBlobsIncludeItemLegalhold ListBlobsIncludeItemType = "legalhold" - // ListBlobsIncludeItemMetadata ... - ListBlobsIncludeItemMetadata ListBlobsIncludeItemType = "metadata" - // ListBlobsIncludeItemNone represents an empty ListBlobsIncludeItemType. - ListBlobsIncludeItemNone ListBlobsIncludeItemType = "" - // ListBlobsIncludeItemPermissions ... - ListBlobsIncludeItemPermissions ListBlobsIncludeItemType = "permissions" - // ListBlobsIncludeItemSnapshots ... - ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots" - // ListBlobsIncludeItemTags ... - ListBlobsIncludeItemTags ListBlobsIncludeItemType = "tags" - // ListBlobsIncludeItemUncommittedblobs ... - ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs" - // ListBlobsIncludeItemVersions ... - ListBlobsIncludeItemVersions ListBlobsIncludeItemType = "versions" -) - -// PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type. -func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType { - return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemDeletedwithversions, ListBlobsIncludeItemImmutabilitypolicy, ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemPermissions, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions} -} - -// ListContainersIncludeType enumerates the values for list containers include type. -type ListContainersIncludeType string - -const ( - // ListContainersIncludeDeleted ... - ListContainersIncludeDeleted ListContainersIncludeType = "deleted" - // ListContainersIncludeMetadata ... - ListContainersIncludeMetadata ListContainersIncludeType = "metadata" - // ListContainersIncludeNone represents an empty ListContainersIncludeType. - ListContainersIncludeNone ListContainersIncludeType = "" - // ListContainersIncludeSystem ... - ListContainersIncludeSystem ListContainersIncludeType = "system" -) - -// PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type. -func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { - return []ListContainersIncludeType{ListContainersIncludeDeleted, ListContainersIncludeMetadata, ListContainersIncludeNone, ListContainersIncludeSystem} -} - -// PremiumPageBlobAccessTierType enumerates the values for premium page blob access tier type. -type PremiumPageBlobAccessTierType string - -const ( - // PremiumPageBlobAccessTierNone represents an empty PremiumPageBlobAccessTierType. - PremiumPageBlobAccessTierNone PremiumPageBlobAccessTierType = "" - // PremiumPageBlobAccessTierP10 ... - PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTierType = "P10" - // PremiumPageBlobAccessTierP15 ... - PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTierType = "P15" - // PremiumPageBlobAccessTierP20 ... - PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTierType = "P20" - // PremiumPageBlobAccessTierP30 ... - PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTierType = "P30" - // PremiumPageBlobAccessTierP4 ... - PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTierType = "P4" - // PremiumPageBlobAccessTierP40 ... - PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTierType = "P40" - // PremiumPageBlobAccessTierP50 ... - PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTierType = "P50" - // PremiumPageBlobAccessTierP6 ... - PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTierType = "P6" - // PremiumPageBlobAccessTierP60 ... - PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTierType = "P60" - // PremiumPageBlobAccessTierP70 ... - PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTierType = "P70" - // PremiumPageBlobAccessTierP80 ... - PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTierType = "P80" -) - -// PossiblePremiumPageBlobAccessTierTypeValues returns an array of possible values for the PremiumPageBlobAccessTierType const type. -func PossiblePremiumPageBlobAccessTierTypeValues() []PremiumPageBlobAccessTierType { - return []PremiumPageBlobAccessTierType{PremiumPageBlobAccessTierNone, PremiumPageBlobAccessTierP10, PremiumPageBlobAccessTierP15, PremiumPageBlobAccessTierP20, PremiumPageBlobAccessTierP30, PremiumPageBlobAccessTierP4, PremiumPageBlobAccessTierP40, PremiumPageBlobAccessTierP50, PremiumPageBlobAccessTierP6, PremiumPageBlobAccessTierP60, PremiumPageBlobAccessTierP70, PremiumPageBlobAccessTierP80} -} - -// PublicAccessType enumerates the values for public access type. -type PublicAccessType string - -const ( - // PublicAccessBlob ... - PublicAccessBlob PublicAccessType = "blob" - // PublicAccessContainer ... - PublicAccessContainer PublicAccessType = "container" - // PublicAccessNone represents an empty PublicAccessType. - PublicAccessNone PublicAccessType = "" -) - -// PossiblePublicAccessTypeValues returns an array of possible values for the PublicAccessType const type. -func PossiblePublicAccessTypeValues() []PublicAccessType { - return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} -} - -// QueryFormatType enumerates the values for query format type. -type QueryFormatType string - -const ( - // QueryFormatArrow ... - QueryFormatArrow QueryFormatType = "arrow" - // QueryFormatDelimited ... - QueryFormatDelimited QueryFormatType = "delimited" - // QueryFormatJSON ... - QueryFormatJSON QueryFormatType = "json" - // QueryFormatNone represents an empty QueryFormatType. - QueryFormatNone QueryFormatType = "" - // QueryFormatParquet ... - QueryFormatParquet QueryFormatType = "parquet" -) - -// PossibleQueryFormatTypeValues returns an array of possible values for the QueryFormatType const type. -func PossibleQueryFormatTypeValues() []QueryFormatType { - return []QueryFormatType{QueryFormatArrow, QueryFormatDelimited, QueryFormatJSON, QueryFormatNone, QueryFormatParquet} -} - -// RehydratePriorityType enumerates the values for rehydrate priority type. -type RehydratePriorityType string - -const ( - // RehydratePriorityHigh ... - RehydratePriorityHigh RehydratePriorityType = "High" - // RehydratePriorityNone represents an empty RehydratePriorityType. - RehydratePriorityNone RehydratePriorityType = "" - // RehydratePriorityStandard ... - RehydratePriorityStandard RehydratePriorityType = "Standard" -) - -// PossibleRehydratePriorityTypeValues returns an array of possible values for the RehydratePriorityType const type. -func PossibleRehydratePriorityTypeValues() []RehydratePriorityType { - return []RehydratePriorityType{RehydratePriorityHigh, RehydratePriorityNone, RehydratePriorityStandard} -} - -// SequenceNumberActionType enumerates the values for sequence number action type. -type SequenceNumberActionType string - -const ( - // SequenceNumberActionIncrement ... - SequenceNumberActionIncrement SequenceNumberActionType = "increment" - // SequenceNumberActionMax ... - SequenceNumberActionMax SequenceNumberActionType = "max" - // SequenceNumberActionNone represents an empty SequenceNumberActionType. - SequenceNumberActionNone SequenceNumberActionType = "" - // SequenceNumberActionUpdate ... - SequenceNumberActionUpdate SequenceNumberActionType = "update" -) - -// PossibleSequenceNumberActionTypeValues returns an array of possible values for the SequenceNumberActionType const type. -func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { - return []SequenceNumberActionType{SequenceNumberActionIncrement, SequenceNumberActionMax, SequenceNumberActionNone, SequenceNumberActionUpdate} -} - -// SkuNameType enumerates the values for sku name type. -type SkuNameType string - -const ( - // SkuNameNone represents an empty SkuNameType. - SkuNameNone SkuNameType = "" - // SkuNamePremiumLRS ... - SkuNamePremiumLRS SkuNameType = "Premium_LRS" - // SkuNameStandardGRS ... - SkuNameStandardGRS SkuNameType = "Standard_GRS" - // SkuNameStandardLRS ... - SkuNameStandardLRS SkuNameType = "Standard_LRS" - // SkuNameStandardRAGRS ... - SkuNameStandardRAGRS SkuNameType = "Standard_RAGRS" - // SkuNameStandardZRS ... - SkuNameStandardZRS SkuNameType = "Standard_ZRS" -) - -// PossibleSkuNameTypeValues returns an array of possible values for the SkuNameType const type. -func PossibleSkuNameTypeValues() []SkuNameType { - return []SkuNameType{SkuNameNone, SkuNamePremiumLRS, SkuNameStandardGRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardZRS} -} - -// StorageErrorCodeType enumerates the values for storage error code type. -type StorageErrorCodeType string - -const ( - // StorageErrorCodeAccountAlreadyExists ... - StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists" - // StorageErrorCodeAccountBeingCreated ... - StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated" - // StorageErrorCodeAccountIsDisabled ... - StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled" - // StorageErrorCodeAppendPositionConditionNotMet ... - StorageErrorCodeAppendPositionConditionNotMet StorageErrorCodeType = "AppendPositionConditionNotMet" - // StorageErrorCodeAuthenticationFailed ... - StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" - // StorageErrorCodeAuthorizationFailure ... - StorageErrorCodeAuthorizationFailure StorageErrorCodeType = "AuthorizationFailure" - // StorageErrorCodeAuthorizationPermissionMismatch ... - StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCodeType = "AuthorizationPermissionMismatch" - // StorageErrorCodeAuthorizationProtocolMismatch ... - StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCodeType = "AuthorizationProtocolMismatch" - // StorageErrorCodeAuthorizationResourceTypeMismatch ... - StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCodeType = "AuthorizationResourceTypeMismatch" - // StorageErrorCodeAuthorizationServiceMismatch ... - StorageErrorCodeAuthorizationServiceMismatch StorageErrorCodeType = "AuthorizationServiceMismatch" - // StorageErrorCodeAuthorizationSourceIPMismatch ... - StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCodeType = "AuthorizationSourceIPMismatch" - // StorageErrorCodeBlobAlreadyExists ... - StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists" - // StorageErrorCodeBlobArchived ... - StorageErrorCodeBlobArchived StorageErrorCodeType = "BlobArchived" - // StorageErrorCodeBlobBeingRehydrated ... - StorageErrorCodeBlobBeingRehydrated StorageErrorCodeType = "BlobBeingRehydrated" - // StorageErrorCodeBlobImmutableDueToPolicy ... - StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCodeType = "BlobImmutableDueToPolicy" - // StorageErrorCodeBlobNotArchived ... - StorageErrorCodeBlobNotArchived StorageErrorCodeType = "BlobNotArchived" - // StorageErrorCodeBlobNotFound ... - StorageErrorCodeBlobNotFound StorageErrorCodeType = "BlobNotFound" - // StorageErrorCodeBlobOverwritten ... - StorageErrorCodeBlobOverwritten StorageErrorCodeType = "BlobOverwritten" - // StorageErrorCodeBlobTierInadequateForContentLength ... - StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCodeType = "BlobTierInadequateForContentLength" - // StorageErrorCodeBlobUsesCustomerSpecifiedEncryption ... - StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCodeType = "BlobUsesCustomerSpecifiedEncryption" - // StorageErrorCodeBlockCountExceedsLimit ... - StorageErrorCodeBlockCountExceedsLimit StorageErrorCodeType = "BlockCountExceedsLimit" - // StorageErrorCodeBlockListTooLong ... - StorageErrorCodeBlockListTooLong StorageErrorCodeType = "BlockListTooLong" - // StorageErrorCodeCannotChangeToLowerTier ... - StorageErrorCodeCannotChangeToLowerTier StorageErrorCodeType = "CannotChangeToLowerTier" - // StorageErrorCodeCannotVerifyCopySource ... - StorageErrorCodeCannotVerifyCopySource StorageErrorCodeType = "CannotVerifyCopySource" - // StorageErrorCodeConditionHeadersNotSupported ... - StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported" - // StorageErrorCodeConditionNotMet ... - StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet" - // StorageErrorCodeContainerAlreadyExists ... - StorageErrorCodeContainerAlreadyExists StorageErrorCodeType = "ContainerAlreadyExists" - // StorageErrorCodeContainerBeingDeleted ... - StorageErrorCodeContainerBeingDeleted StorageErrorCodeType = "ContainerBeingDeleted" - // StorageErrorCodeContainerDisabled ... - StorageErrorCodeContainerDisabled StorageErrorCodeType = "ContainerDisabled" - // StorageErrorCodeContainerNotFound ... - StorageErrorCodeContainerNotFound StorageErrorCodeType = "ContainerNotFound" - // StorageErrorCodeContentLengthLargerThanTierLimit ... - StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCodeType = "ContentLengthLargerThanTierLimit" - // StorageErrorCodeCopyAcrossAccountsNotSupported ... - StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCodeType = "CopyAcrossAccountsNotSupported" - // StorageErrorCodeCopyIDMismatch ... - StorageErrorCodeCopyIDMismatch StorageErrorCodeType = "CopyIdMismatch" - // StorageErrorCodeEmptyMetadataKey ... - StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey" - // StorageErrorCodeFeatureVersionMismatch ... - StorageErrorCodeFeatureVersionMismatch StorageErrorCodeType = "FeatureVersionMismatch" - // StorageErrorCodeIncrementalCopyBlobMismatch ... - StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCodeType = "IncrementalCopyBlobMismatch" - // StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ... - StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" - // StorageErrorCodeIncrementalCopySourceMustBeSnapshot ... - StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCodeType = "IncrementalCopySourceMustBeSnapshot" - // StorageErrorCodeInfiniteLeaseDurationRequired ... - StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCodeType = "InfiniteLeaseDurationRequired" - // StorageErrorCodeInsufficientAccountPermissions ... - StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions" - // StorageErrorCodeInternalError ... - StorageErrorCodeInternalError StorageErrorCodeType = "InternalError" - // StorageErrorCodeInvalidAuthenticationInfo ... - StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo" - // StorageErrorCodeInvalidBlobOrBlock ... - StorageErrorCodeInvalidBlobOrBlock StorageErrorCodeType = "InvalidBlobOrBlock" - // StorageErrorCodeInvalidBlobTier ... - StorageErrorCodeInvalidBlobTier StorageErrorCodeType = "InvalidBlobTier" - // StorageErrorCodeInvalidBlobType ... - StorageErrorCodeInvalidBlobType StorageErrorCodeType = "InvalidBlobType" - // StorageErrorCodeInvalidBlockID ... - StorageErrorCodeInvalidBlockID StorageErrorCodeType = "InvalidBlockId" - // StorageErrorCodeInvalidBlockList ... - StorageErrorCodeInvalidBlockList StorageErrorCodeType = "InvalidBlockList" - // StorageErrorCodeInvalidHeaderValue ... - StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue" - // StorageErrorCodeInvalidHTTPVerb ... - StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb" - // StorageErrorCodeInvalidInput ... - StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput" - // StorageErrorCodeInvalidMd5 ... - StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5" - // StorageErrorCodeInvalidMetadata ... - StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata" - // StorageErrorCodeInvalidOperation ... - StorageErrorCodeInvalidOperation StorageErrorCodeType = "InvalidOperation" - // StorageErrorCodeInvalidPageRange ... - StorageErrorCodeInvalidPageRange StorageErrorCodeType = "InvalidPageRange" - // StorageErrorCodeInvalidQueryParameterValue ... - StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue" - // StorageErrorCodeInvalidRange ... - StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange" - // StorageErrorCodeInvalidResourceName ... - StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName" - // StorageErrorCodeInvalidSourceBlobType ... - StorageErrorCodeInvalidSourceBlobType StorageErrorCodeType = "InvalidSourceBlobType" - // StorageErrorCodeInvalidSourceBlobURL ... - StorageErrorCodeInvalidSourceBlobURL StorageErrorCodeType = "InvalidSourceBlobUrl" - // StorageErrorCodeInvalidURI ... - StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri" - // StorageErrorCodeInvalidVersionForPageBlobOperation ... - StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCodeType = "InvalidVersionForPageBlobOperation" - // StorageErrorCodeInvalidXMLDocument ... - StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument" - // StorageErrorCodeInvalidXMLNodeValue ... - StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue" - // StorageErrorCodeLeaseAlreadyBroken ... - StorageErrorCodeLeaseAlreadyBroken StorageErrorCodeType = "LeaseAlreadyBroken" - // StorageErrorCodeLeaseAlreadyPresent ... - StorageErrorCodeLeaseAlreadyPresent StorageErrorCodeType = "LeaseAlreadyPresent" - // StorageErrorCodeLeaseIDMismatchWithBlobOperation ... - StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCodeType = "LeaseIdMismatchWithBlobOperation" - // StorageErrorCodeLeaseIDMismatchWithContainerOperation ... - StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCodeType = "LeaseIdMismatchWithContainerOperation" - // StorageErrorCodeLeaseIDMismatchWithLeaseOperation ... - StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCodeType = "LeaseIdMismatchWithLeaseOperation" - // StorageErrorCodeLeaseIDMissing ... - StorageErrorCodeLeaseIDMissing StorageErrorCodeType = "LeaseIdMissing" - // StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired ... - StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCodeType = "LeaseIsBreakingAndCannotBeAcquired" - // StorageErrorCodeLeaseIsBreakingAndCannotBeChanged ... - StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCodeType = "LeaseIsBreakingAndCannotBeChanged" - // StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed ... - StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCodeType = "LeaseIsBrokenAndCannotBeRenewed" - // StorageErrorCodeLeaseLost ... - StorageErrorCodeLeaseLost StorageErrorCodeType = "LeaseLost" - // StorageErrorCodeLeaseNotPresentWithBlobOperation ... - StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCodeType = "LeaseNotPresentWithBlobOperation" - // StorageErrorCodeLeaseNotPresentWithContainerOperation ... - StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCodeType = "LeaseNotPresentWithContainerOperation" - // StorageErrorCodeLeaseNotPresentWithLeaseOperation ... - StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCodeType = "LeaseNotPresentWithLeaseOperation" - // StorageErrorCodeMaxBlobSizeConditionNotMet ... - StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCodeType = "MaxBlobSizeConditionNotMet" - // StorageErrorCodeMd5Mismatch ... - StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch" - // StorageErrorCodeMetadataTooLarge ... - StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge" - // StorageErrorCodeMissingContentLengthHeader ... - StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader" - // StorageErrorCodeMissingRequiredHeader ... - StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader" - // StorageErrorCodeMissingRequiredQueryParameter ... - StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter" - // StorageErrorCodeMissingRequiredXMLNode ... - StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" - // StorageErrorCodeMultipleConditionHeadersNotSupported ... - StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" - // StorageErrorCodeNoAuthenticationInformation ... - StorageErrorCodeNoAuthenticationInformation StorageErrorCodeType = "NoAuthenticationInformation" - // StorageErrorCodeNone represents an empty StorageErrorCodeType. - StorageErrorCodeNone StorageErrorCodeType = "" - // StorageErrorCodeNoPendingCopyOperation ... - StorageErrorCodeNoPendingCopyOperation StorageErrorCodeType = "NoPendingCopyOperation" - // StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob ... - StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCodeType = "OperationNotAllowedOnIncrementalCopyBlob" - // StorageErrorCodeOperationTimedOut ... - StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut" - // StorageErrorCodeOutOfRangeInput ... - StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput" - // StorageErrorCodeOutOfRangeQueryParameterValue ... - StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue" - // StorageErrorCodePendingCopyOperation ... - StorageErrorCodePendingCopyOperation StorageErrorCodeType = "PendingCopyOperation" - // StorageErrorCodePreviousSnapshotCannotBeNewer ... - StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCodeType = "PreviousSnapshotCannotBeNewer" - // StorageErrorCodePreviousSnapshotNotFound ... - StorageErrorCodePreviousSnapshotNotFound StorageErrorCodeType = "PreviousSnapshotNotFound" - // StorageErrorCodePreviousSnapshotOperationNotSupported ... - StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCodeType = "PreviousSnapshotOperationNotSupported" - // StorageErrorCodeRequestBodyTooLarge ... - StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge" - // StorageErrorCodeRequestURLFailedToParse ... - StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse" - // StorageErrorCodeResourceAlreadyExists ... - StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists" - // StorageErrorCodeResourceNotFound ... - StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound" - // StorageErrorCodeResourceTypeMismatch ... - StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch" - // StorageErrorCodeSequenceNumberConditionNotMet ... - StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCodeType = "SequenceNumberConditionNotMet" - // StorageErrorCodeSequenceNumberIncrementTooLarge ... - StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCodeType = "SequenceNumberIncrementTooLarge" - // StorageErrorCodeServerBusy ... - StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy" - // StorageErrorCodeSnapshotCountExceeded ... - StorageErrorCodeSnapshotCountExceeded StorageErrorCodeType = "SnapshotCountExceeded" - // StorageErrorCodeSnapshotOperationRateExceeded ... - StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCodeType = "SnapshotOperationRateExceeded" - // StorageErrorCodeSnapshotsPresent ... - StorageErrorCodeSnapshotsPresent StorageErrorCodeType = "SnapshotsPresent" - // StorageErrorCodeSourceConditionNotMet ... - StorageErrorCodeSourceConditionNotMet StorageErrorCodeType = "SourceConditionNotMet" - // StorageErrorCodeSystemInUse ... - StorageErrorCodeSystemInUse StorageErrorCodeType = "SystemInUse" - // StorageErrorCodeTargetConditionNotMet ... - StorageErrorCodeTargetConditionNotMet StorageErrorCodeType = "TargetConditionNotMet" - // StorageErrorCodeUnauthorizedBlobOverwrite ... - StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCodeType = "UnauthorizedBlobOverwrite" - // StorageErrorCodeUnsupportedHeader ... - StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader" - // StorageErrorCodeUnsupportedHTTPVerb ... - StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb" - // StorageErrorCodeUnsupportedQueryParameter ... - StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter" - // StorageErrorCodeUnsupportedXMLNode ... - StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode" -) - -// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. -func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { - return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeAuthorizationPermissionMismatch, StorageErrorCodeAuthorizationProtocolMismatch, StorageErrorCodeAuthorizationResourceTypeMismatch, StorageErrorCodeAuthorizationServiceMismatch, StorageErrorCodeAuthorizationSourceIPMismatch, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobImmutableDueToPolicy, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNoAuthenticationInformation, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotOperationRateExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} -} - -// SyncCopyStatusType enumerates the values for sync copy status type. -type SyncCopyStatusType string - -const ( - // SyncCopyStatusNone represents an empty SyncCopyStatusType. - SyncCopyStatusNone SyncCopyStatusType = "" - // SyncCopyStatusSuccess ... - SyncCopyStatusSuccess SyncCopyStatusType = "success" -) - -// PossibleSyncCopyStatusTypeValues returns an array of possible values for the SyncCopyStatusType const type. -func PossibleSyncCopyStatusTypeValues() []SyncCopyStatusType { - return []SyncCopyStatusType{SyncCopyStatusNone, SyncCopyStatusSuccess} -} - -// AccessPolicy - An Access policy -type AccessPolicy struct { - // Start - the date-time the policy is active - Start *time.Time `xml:"Start"` - // Expiry - the date-time the policy expires - Expiry *time.Time `xml:"Expiry"` - // Permission - the permissions for the acl policy - Permission *string `xml:"Permission"` -} - -// MarshalXML implements the xml.Marshaler interface for AccessPolicy. -func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - ap2 := (*accessPolicy)(unsafe.Pointer(&ap)) - return e.EncodeElement(*ap2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy. -func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - ap2 := (*accessPolicy)(unsafe.Pointer(ap)) - return d.DecodeElement(ap2, &start) -} - -// AppendBlobAppendBlockFromURLResponse ... -type AppendBlobAppendBlockFromURLResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (ababfur AppendBlobAppendBlockFromURLResponse) Response() *http.Response { - return ababfur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (ababfur AppendBlobAppendBlockFromURLResponse) StatusCode() int { - return ababfur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (ababfur AppendBlobAppendBlockFromURLResponse) Status() string { - return ababfur.rawResponse.Status -} - -// BlobAppendOffset returns the value for header x-ms-blob-append-offset. -func (ababfur AppendBlobAppendBlockFromURLResponse) BlobAppendOffset() string { - return ababfur.rawResponse.Header.Get("x-ms-blob-append-offset") -} - -// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. -func (ababfur AppendBlobAppendBlockFromURLResponse) BlobCommittedBlockCount() int32 { - s := ababfur.rawResponse.Header.Get("x-ms-blob-committed-block-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// ContentMD5 returns the value for header Content-MD5. -func (ababfur AppendBlobAppendBlockFromURLResponse) ContentMD5() []byte { - s := ababfur.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (ababfur AppendBlobAppendBlockFromURLResponse) Date() time.Time { - s := ababfur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionKeySha256() string { - return ababfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionScope() string { - return ababfur.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (ababfur AppendBlobAppendBlockFromURLResponse) ErrorCode() string { - return ababfur.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (ababfur AppendBlobAppendBlockFromURLResponse) ETag() ETag { - return ETag(ababfur.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (ababfur AppendBlobAppendBlockFromURLResponse) IsServerEncrypted() string { - return ababfur.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (ababfur AppendBlobAppendBlockFromURLResponse) LastModified() time.Time { - s := ababfur.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (ababfur AppendBlobAppendBlockFromURLResponse) RequestID() string { - return ababfur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (ababfur AppendBlobAppendBlockFromURLResponse) Version() string { - return ababfur.rawResponse.Header.Get("x-ms-version") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (ababfur AppendBlobAppendBlockFromURLResponse) XMsContentCrc64() []byte { - s := ababfur.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// AppendBlobAppendBlockResponse ... -type AppendBlobAppendBlockResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (ababr AppendBlobAppendBlockResponse) Response() *http.Response { - return ababr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (ababr AppendBlobAppendBlockResponse) StatusCode() int { - return ababr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (ababr AppendBlobAppendBlockResponse) Status() string { - return ababr.rawResponse.Status -} - -// BlobAppendOffset returns the value for header x-ms-blob-append-offset. -func (ababr AppendBlobAppendBlockResponse) BlobAppendOffset() string { - return ababr.rawResponse.Header.Get("x-ms-blob-append-offset") -} - -// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. -func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 { - s := ababr.rawResponse.Header.Get("x-ms-blob-committed-block-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (ababr AppendBlobAppendBlockResponse) ClientRequestID() string { - return ababr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte { - s := ababr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (ababr AppendBlobAppendBlockResponse) Date() time.Time { - s := ababr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (ababr AppendBlobAppendBlockResponse) EncryptionKeySha256() string { - return ababr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (ababr AppendBlobAppendBlockResponse) EncryptionScope() string { - return ababr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (ababr AppendBlobAppendBlockResponse) ErrorCode() string { - return ababr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (ababr AppendBlobAppendBlockResponse) ETag() ETag { - return ETag(ababr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (ababr AppendBlobAppendBlockResponse) IsServerEncrypted() string { - return ababr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (ababr AppendBlobAppendBlockResponse) LastModified() time.Time { - s := ababr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (ababr AppendBlobAppendBlockResponse) RequestID() string { - return ababr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (ababr AppendBlobAppendBlockResponse) Version() string { - return ababr.rawResponse.Header.Get("x-ms-version") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (ababr AppendBlobAppendBlockResponse) XMsContentCrc64() []byte { - s := ababr.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// AppendBlobCreateResponse ... -type AppendBlobCreateResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (abcr AppendBlobCreateResponse) Response() *http.Response { - return abcr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (abcr AppendBlobCreateResponse) StatusCode() int { - return abcr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (abcr AppendBlobCreateResponse) Status() string { - return abcr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (abcr AppendBlobCreateResponse) ClientRequestID() string { - return abcr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (abcr AppendBlobCreateResponse) ContentMD5() []byte { - s := abcr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (abcr AppendBlobCreateResponse) Date() time.Time { - s := abcr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (abcr AppendBlobCreateResponse) EncryptionKeySha256() string { - return abcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (abcr AppendBlobCreateResponse) EncryptionScope() string { - return abcr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (abcr AppendBlobCreateResponse) ErrorCode() string { - return abcr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (abcr AppendBlobCreateResponse) ETag() ETag { - return ETag(abcr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (abcr AppendBlobCreateResponse) IsServerEncrypted() string { - return abcr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (abcr AppendBlobCreateResponse) LastModified() time.Time { - s := abcr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (abcr AppendBlobCreateResponse) RequestID() string { - return abcr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (abcr AppendBlobCreateResponse) Version() string { - return abcr.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (abcr AppendBlobCreateResponse) VersionID() string { - return abcr.rawResponse.Header.Get("x-ms-version-id") -} - -// AppendBlobSealResponse ... -type AppendBlobSealResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (absr AppendBlobSealResponse) Response() *http.Response { - return absr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (absr AppendBlobSealResponse) StatusCode() int { - return absr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (absr AppendBlobSealResponse) Status() string { - return absr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (absr AppendBlobSealResponse) ClientRequestID() string { - return absr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (absr AppendBlobSealResponse) Date() time.Time { - s := absr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (absr AppendBlobSealResponse) ErrorCode() string { - return absr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (absr AppendBlobSealResponse) ETag() ETag { - return ETag(absr.rawResponse.Header.Get("ETag")) -} - -// IsSealed returns the value for header x-ms-blob-sealed. -func (absr AppendBlobSealResponse) IsSealed() string { - return absr.rawResponse.Header.Get("x-ms-blob-sealed") -} - -// LastModified returns the value for header Last-Modified. -func (absr AppendBlobSealResponse) LastModified() time.Time { - s := absr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (absr AppendBlobSealResponse) RequestID() string { - return absr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (absr AppendBlobSealResponse) Version() string { - return absr.rawResponse.Header.Get("x-ms-version") -} - -// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow -// formatted. -type ArrowConfiguration struct { - Schema []ArrowField `xml:"Schema>Field"` -} - -// ArrowField - Groups settings regarding specific field of an arrow schema -type ArrowField struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Field"` - Type string `xml:"Type"` - Name *string `xml:"Name"` - Precision *int32 `xml:"Precision"` - Scale *int32 `xml:"Scale"` -} - -// BlobAbortCopyFromURLResponse ... -type BlobAbortCopyFromURLResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bacfur BlobAbortCopyFromURLResponse) Response() *http.Response { - return bacfur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bacfur BlobAbortCopyFromURLResponse) StatusCode() int { - return bacfur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bacfur BlobAbortCopyFromURLResponse) Status() string { - return bacfur.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bacfur BlobAbortCopyFromURLResponse) ClientRequestID() string { - return bacfur.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time { - s := bacfur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bacfur BlobAbortCopyFromURLResponse) ErrorCode() string { - return bacfur.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bacfur BlobAbortCopyFromURLResponse) RequestID() string { - return bacfur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bacfur BlobAbortCopyFromURLResponse) Version() string { - return bacfur.rawResponse.Header.Get("x-ms-version") -} - -// BlobAcquireLeaseResponse ... -type BlobAcquireLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (balr BlobAcquireLeaseResponse) Response() *http.Response { - return balr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (balr BlobAcquireLeaseResponse) StatusCode() int { - return balr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (balr BlobAcquireLeaseResponse) Status() string { - return balr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (balr BlobAcquireLeaseResponse) ClientRequestID() string { - return balr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (balr BlobAcquireLeaseResponse) Date() time.Time { - s := balr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (balr BlobAcquireLeaseResponse) ErrorCode() string { - return balr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (balr BlobAcquireLeaseResponse) ETag() ETag { - return ETag(balr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (balr BlobAcquireLeaseResponse) LastModified() time.Time { - s := balr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseID returns the value for header x-ms-lease-id. -func (balr BlobAcquireLeaseResponse) LeaseID() string { - return balr.rawResponse.Header.Get("x-ms-lease-id") -} - -// RequestID returns the value for header x-ms-request-id. -func (balr BlobAcquireLeaseResponse) RequestID() string { - return balr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (balr BlobAcquireLeaseResponse) Version() string { - return balr.rawResponse.Header.Get("x-ms-version") -} - -// BlobBreakLeaseResponse ... -type BlobBreakLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bblr BlobBreakLeaseResponse) Response() *http.Response { - return bblr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bblr BlobBreakLeaseResponse) StatusCode() int { - return bblr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bblr BlobBreakLeaseResponse) Status() string { - return bblr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bblr BlobBreakLeaseResponse) ClientRequestID() string { - return bblr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bblr BlobBreakLeaseResponse) Date() time.Time { - s := bblr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bblr BlobBreakLeaseResponse) ErrorCode() string { - return bblr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bblr BlobBreakLeaseResponse) ETag() ETag { - return ETag(bblr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (bblr BlobBreakLeaseResponse) LastModified() time.Time { - s := bblr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseTime returns the value for header x-ms-lease-time. -func (bblr BlobBreakLeaseResponse) LeaseTime() int32 { - s := bblr.rawResponse.Header.Get("x-ms-lease-time") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// RequestID returns the value for header x-ms-request-id. -func (bblr BlobBreakLeaseResponse) RequestID() string { - return bblr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bblr BlobBreakLeaseResponse) Version() string { - return bblr.rawResponse.Header.Get("x-ms-version") -} - -// BlobChangeLeaseResponse ... -type BlobChangeLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bclr BlobChangeLeaseResponse) Response() *http.Response { - return bclr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bclr BlobChangeLeaseResponse) StatusCode() int { - return bclr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bclr BlobChangeLeaseResponse) Status() string { - return bclr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bclr BlobChangeLeaseResponse) ClientRequestID() string { - return bclr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bclr BlobChangeLeaseResponse) Date() time.Time { - s := bclr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bclr BlobChangeLeaseResponse) ErrorCode() string { - return bclr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bclr BlobChangeLeaseResponse) ETag() ETag { - return ETag(bclr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (bclr BlobChangeLeaseResponse) LastModified() time.Time { - s := bclr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseID returns the value for header x-ms-lease-id. -func (bclr BlobChangeLeaseResponse) LeaseID() string { - return bclr.rawResponse.Header.Get("x-ms-lease-id") -} - -// RequestID returns the value for header x-ms-request-id. -func (bclr BlobChangeLeaseResponse) RequestID() string { - return bclr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bclr BlobChangeLeaseResponse) Version() string { - return bclr.rawResponse.Header.Get("x-ms-version") -} - -// BlobCopyFromURLResponse ... -type BlobCopyFromURLResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bcfur BlobCopyFromURLResponse) Response() *http.Response { - return bcfur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bcfur BlobCopyFromURLResponse) StatusCode() int { - return bcfur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bcfur BlobCopyFromURLResponse) Status() string { - return bcfur.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bcfur BlobCopyFromURLResponse) ClientRequestID() string { - return bcfur.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (bcfur BlobCopyFromURLResponse) ContentMD5() []byte { - s := bcfur.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// CopyID returns the value for header x-ms-copy-id. -func (bcfur BlobCopyFromURLResponse) CopyID() string { - return bcfur.rawResponse.Header.Get("x-ms-copy-id") -} - -// CopyStatus returns the value for header x-ms-copy-status. -func (bcfur BlobCopyFromURLResponse) CopyStatus() SyncCopyStatusType { - return SyncCopyStatusType(bcfur.rawResponse.Header.Get("x-ms-copy-status")) -} - -// Date returns the value for header Date. -func (bcfur BlobCopyFromURLResponse) Date() time.Time { - s := bcfur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bcfur BlobCopyFromURLResponse) ErrorCode() string { - return bcfur.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bcfur BlobCopyFromURLResponse) ETag() ETag { - return ETag(bcfur.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (bcfur BlobCopyFromURLResponse) LastModified() time.Time { - s := bcfur.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bcfur BlobCopyFromURLResponse) RequestID() string { - return bcfur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bcfur BlobCopyFromURLResponse) Version() string { - return bcfur.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bcfur BlobCopyFromURLResponse) VersionID() string { - return bcfur.rawResponse.Header.Get("x-ms-version-id") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (bcfur BlobCopyFromURLResponse) XMsContentCrc64() []byte { - s := bcfur.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// BlobCreateSnapshotResponse ... -type BlobCreateSnapshotResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bcsr BlobCreateSnapshotResponse) Response() *http.Response { - return bcsr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bcsr BlobCreateSnapshotResponse) StatusCode() int { - return bcsr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bcsr BlobCreateSnapshotResponse) Status() string { - return bcsr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bcsr BlobCreateSnapshotResponse) ClientRequestID() string { - return bcsr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bcsr BlobCreateSnapshotResponse) Date() time.Time { - s := bcsr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bcsr BlobCreateSnapshotResponse) ErrorCode() string { - return bcsr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bcsr BlobCreateSnapshotResponse) ETag() ETag { - return ETag(bcsr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (bcsr BlobCreateSnapshotResponse) IsServerEncrypted() string { - return bcsr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time { - s := bcsr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bcsr BlobCreateSnapshotResponse) RequestID() string { - return bcsr.rawResponse.Header.Get("x-ms-request-id") -} - -// Snapshot returns the value for header x-ms-snapshot. -func (bcsr BlobCreateSnapshotResponse) Snapshot() string { - return bcsr.rawResponse.Header.Get("x-ms-snapshot") -} - -// Version returns the value for header x-ms-version. -func (bcsr BlobCreateSnapshotResponse) Version() string { - return bcsr.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bcsr BlobCreateSnapshotResponse) VersionID() string { - return bcsr.rawResponse.Header.Get("x-ms-version-id") -} - -// BlobDeleteImmutabilityPolicyResponse ... -type BlobDeleteImmutabilityPolicyResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bdipr BlobDeleteImmutabilityPolicyResponse) Response() *http.Response { - return bdipr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bdipr BlobDeleteImmutabilityPolicyResponse) StatusCode() int { - return bdipr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bdipr BlobDeleteImmutabilityPolicyResponse) Status() string { - return bdipr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bdipr BlobDeleteImmutabilityPolicyResponse) ClientRequestID() string { - return bdipr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bdipr BlobDeleteImmutabilityPolicyResponse) Date() time.Time { - s := bdipr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bdipr BlobDeleteImmutabilityPolicyResponse) ErrorCode() string { - return bdipr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bdipr BlobDeleteImmutabilityPolicyResponse) RequestID() string { - return bdipr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bdipr BlobDeleteImmutabilityPolicyResponse) Version() string { - return bdipr.rawResponse.Header.Get("x-ms-version") -} - -// BlobDeleteResponse ... -type BlobDeleteResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bdr BlobDeleteResponse) Response() *http.Response { - return bdr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bdr BlobDeleteResponse) StatusCode() int { - return bdr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bdr BlobDeleteResponse) Status() string { - return bdr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bdr BlobDeleteResponse) ClientRequestID() string { - return bdr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bdr BlobDeleteResponse) Date() time.Time { - s := bdr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bdr BlobDeleteResponse) ErrorCode() string { - return bdr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bdr BlobDeleteResponse) RequestID() string { - return bdr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bdr BlobDeleteResponse) Version() string { - return bdr.rawResponse.Header.Get("x-ms-version") -} - -// BlobFlatListSegment ... -type BlobFlatListSegment struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Blobs"` - BlobItems []BlobItemInternal `xml:"Blob"` -} - -// BlobGetAccountInfoResponse ... -type BlobGetAccountInfoResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bgair BlobGetAccountInfoResponse) Response() *http.Response { - return bgair.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bgair BlobGetAccountInfoResponse) StatusCode() int { - return bgair.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bgair BlobGetAccountInfoResponse) Status() string { - return bgair.rawResponse.Status -} - -// AccountKind returns the value for header x-ms-account-kind. -func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType { - return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind")) -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bgair BlobGetAccountInfoResponse) ClientRequestID() string { - return bgair.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bgair BlobGetAccountInfoResponse) Date() time.Time { - s := bgair.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bgair BlobGetAccountInfoResponse) ErrorCode() string { - return bgair.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bgair BlobGetAccountInfoResponse) RequestID() string { - return bgair.rawResponse.Header.Get("x-ms-request-id") -} - -// SkuName returns the value for header x-ms-sku-name. -func (bgair BlobGetAccountInfoResponse) SkuName() SkuNameType { - return SkuNameType(bgair.rawResponse.Header.Get("x-ms-sku-name")) -} - -// Version returns the value for header x-ms-version. -func (bgair BlobGetAccountInfoResponse) Version() string { - return bgair.rawResponse.Header.Get("x-ms-version") -} - -// BlobGetPropertiesResponse ... -type BlobGetPropertiesResponse struct { - rawResponse *http.Response -} - -// NewMetadata returns user-defined key/value pairs. -func (bgpr BlobGetPropertiesResponse) NewMetadata() Metadata { - md := Metadata{} - for k, v := range bgpr.rawResponse.Header { - if len(k) > mdPrefixLen { - if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { - md[strings.ToLower(k[mdPrefixLen:])] = v[0] - } - } - } - return md -} - -// Response returns the raw HTTP response object. -func (bgpr BlobGetPropertiesResponse) Response() *http.Response { - return bgpr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bgpr BlobGetPropertiesResponse) StatusCode() int { - return bgpr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bgpr BlobGetPropertiesResponse) Status() string { - return bgpr.rawResponse.Status -} - -// AcceptRanges returns the value for header Accept-Ranges. -func (bgpr BlobGetPropertiesResponse) AcceptRanges() string { - return bgpr.rawResponse.Header.Get("Accept-Ranges") -} - -// AccessTier returns the value for header x-ms-access-tier. -func (bgpr BlobGetPropertiesResponse) AccessTier() string { - return bgpr.rawResponse.Header.Get("x-ms-access-tier") -} - -// AccessTierChangeTime returns the value for header x-ms-access-tier-change-time. -func (bgpr BlobGetPropertiesResponse) AccessTierChangeTime() time.Time { - s := bgpr.rawResponse.Header.Get("x-ms-access-tier-change-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// AccessTierInferred returns the value for header x-ms-access-tier-inferred. -func (bgpr BlobGetPropertiesResponse) AccessTierInferred() string { - return bgpr.rawResponse.Header.Get("x-ms-access-tier-inferred") -} - -// ArchiveStatus returns the value for header x-ms-archive-status. -func (bgpr BlobGetPropertiesResponse) ArchiveStatus() string { - return bgpr.rawResponse.Header.Get("x-ms-archive-status") -} - -// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. -func (bgpr BlobGetPropertiesResponse) BlobCommittedBlockCount() int32 { - s := bgpr.rawResponse.Header.Get("x-ms-blob-committed-block-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (bgpr BlobGetPropertiesResponse) BlobSequenceNumber() int64 { - s := bgpr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// BlobType returns the value for header x-ms-blob-type. -func (bgpr BlobGetPropertiesResponse) BlobType() BlobType { - return BlobType(bgpr.rawResponse.Header.Get("x-ms-blob-type")) -} - -// CacheControl returns the value for header Cache-Control. -func (bgpr BlobGetPropertiesResponse) CacheControl() string { - return bgpr.rawResponse.Header.Get("Cache-Control") -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bgpr BlobGetPropertiesResponse) ClientRequestID() string { - return bgpr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentDisposition returns the value for header Content-Disposition. -func (bgpr BlobGetPropertiesResponse) ContentDisposition() string { - return bgpr.rawResponse.Header.Get("Content-Disposition") -} - -// ContentEncoding returns the value for header Content-Encoding. -func (bgpr BlobGetPropertiesResponse) ContentEncoding() string { - return bgpr.rawResponse.Header.Get("Content-Encoding") -} - -// ContentLanguage returns the value for header Content-Language. -func (bgpr BlobGetPropertiesResponse) ContentLanguage() string { - return bgpr.rawResponse.Header.Get("Content-Language") -} - -// ContentLength returns the value for header Content-Length. -func (bgpr BlobGetPropertiesResponse) ContentLength() int64 { - s := bgpr.rawResponse.Header.Get("Content-Length") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ContentMD5 returns the value for header Content-MD5. -func (bgpr BlobGetPropertiesResponse) ContentMD5() []byte { - s := bgpr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// ContentType returns the value for header Content-Type. -func (bgpr BlobGetPropertiesResponse) ContentType() string { - return bgpr.rawResponse.Header.Get("Content-Type") -} - -// CopyCompletionTime returns the value for header x-ms-copy-completion-time. -func (bgpr BlobGetPropertiesResponse) CopyCompletionTime() time.Time { - s := bgpr.rawResponse.Header.Get("x-ms-copy-completion-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// CopyID returns the value for header x-ms-copy-id. -func (bgpr BlobGetPropertiesResponse) CopyID() string { - return bgpr.rawResponse.Header.Get("x-ms-copy-id") -} - -// CopyProgress returns the value for header x-ms-copy-progress. -func (bgpr BlobGetPropertiesResponse) CopyProgress() string { - return bgpr.rawResponse.Header.Get("x-ms-copy-progress") -} - -// CopySource returns the value for header x-ms-copy-source. -func (bgpr BlobGetPropertiesResponse) CopySource() string { - return bgpr.rawResponse.Header.Get("x-ms-copy-source") -} - -// CopyStatus returns the value for header x-ms-copy-status. -func (bgpr BlobGetPropertiesResponse) CopyStatus() CopyStatusType { - return CopyStatusType(bgpr.rawResponse.Header.Get("x-ms-copy-status")) -} - -// CopyStatusDescription returns the value for header x-ms-copy-status-description. -func (bgpr BlobGetPropertiesResponse) CopyStatusDescription() string { - return bgpr.rawResponse.Header.Get("x-ms-copy-status-description") -} - -// CreationTime returns the value for header x-ms-creation-time. -func (bgpr BlobGetPropertiesResponse) CreationTime() time.Time { - s := bgpr.rawResponse.Header.Get("x-ms-creation-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// Date returns the value for header Date. -func (bgpr BlobGetPropertiesResponse) Date() time.Time { - s := bgpr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// DestinationSnapshot returns the value for header x-ms-copy-destination-snapshot. -func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string { - return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot") -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (bgpr BlobGetPropertiesResponse) EncryptionKeySha256() string { - return bgpr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (bgpr BlobGetPropertiesResponse) EncryptionScope() string { - return bgpr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bgpr BlobGetPropertiesResponse) ErrorCode() string { - return bgpr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bgpr BlobGetPropertiesResponse) ETag() ETag { - return ETag(bgpr.rawResponse.Header.Get("ETag")) -} - -// ExpiresOn returns the value for header x-ms-expiry-time. -func (bgpr BlobGetPropertiesResponse) ExpiresOn() time.Time { - s := bgpr.rawResponse.Header.Get("x-ms-expiry-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ImmutabilityPolicyExpiresOn returns the value for header x-ms-immutability-policy-until-date. -func (bgpr BlobGetPropertiesResponse) ImmutabilityPolicyExpiresOn() time.Time { - s := bgpr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. -func (bgpr BlobGetPropertiesResponse) ImmutabilityPolicyMode() BlobImmutabilityPolicyModeType { - return BlobImmutabilityPolicyModeType(bgpr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) -} - -// IsCurrentVersion returns the value for header x-ms-is-current-version. -func (bgpr BlobGetPropertiesResponse) IsCurrentVersion() string { - return bgpr.rawResponse.Header.Get("x-ms-is-current-version") -} - -// IsIncrementalCopy returns the value for header x-ms-incremental-copy. -func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string { - return bgpr.rawResponse.Header.Get("x-ms-incremental-copy") -} - -// IsSealed returns the value for header x-ms-blob-sealed. -func (bgpr BlobGetPropertiesResponse) IsSealed() string { - return bgpr.rawResponse.Header.Get("x-ms-blob-sealed") -} - -// IsServerEncrypted returns the value for header x-ms-server-encrypted. -func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string { - return bgpr.rawResponse.Header.Get("x-ms-server-encrypted") -} - -// LastAccessed returns the value for header x-ms-last-access-time. -func (bgpr BlobGetPropertiesResponse) LastAccessed() time.Time { - s := bgpr.rawResponse.Header.Get("x-ms-last-access-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LastModified returns the value for header Last-Modified. -func (bgpr BlobGetPropertiesResponse) LastModified() time.Time { - s := bgpr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseDuration returns the value for header x-ms-lease-duration. -func (bgpr BlobGetPropertiesResponse) LeaseDuration() LeaseDurationType { - return LeaseDurationType(bgpr.rawResponse.Header.Get("x-ms-lease-duration")) -} - -// LeaseState returns the value for header x-ms-lease-state. -func (bgpr BlobGetPropertiesResponse) LeaseState() LeaseStateType { - return LeaseStateType(bgpr.rawResponse.Header.Get("x-ms-lease-state")) -} - -// LeaseStatus returns the value for header x-ms-lease-status. -func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType { - return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status")) -} - -// LegalHold returns the value for header x-ms-legal-hold. -func (bgpr BlobGetPropertiesResponse) LegalHold() string { - return bgpr.rawResponse.Header.Get("x-ms-legal-hold") -} - -// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. -func (bgpr BlobGetPropertiesResponse) ObjectReplicationPolicyID() string { - return bgpr.rawResponse.Header.Get("x-ms-or-policy-id") -} - -// ObjectReplicationRules returns the value for header x-ms-or. -func (bgpr BlobGetPropertiesResponse) ObjectReplicationRules() string { - return bgpr.rawResponse.Header.Get("x-ms-or") -} - -// RehydratePriority returns the value for header x-ms-rehydrate-priority. -func (bgpr BlobGetPropertiesResponse) RehydratePriority() string { - return bgpr.rawResponse.Header.Get("x-ms-rehydrate-priority") -} - -// RequestID returns the value for header x-ms-request-id. -func (bgpr BlobGetPropertiesResponse) RequestID() string { - return bgpr.rawResponse.Header.Get("x-ms-request-id") -} - -// TagCount returns the value for header x-ms-tag-count. -func (bgpr BlobGetPropertiesResponse) TagCount() int64 { - s := bgpr.rawResponse.Header.Get("x-ms-tag-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// Version returns the value for header x-ms-version. -func (bgpr BlobGetPropertiesResponse) Version() string { - return bgpr.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bgpr BlobGetPropertiesResponse) VersionID() string { - return bgpr.rawResponse.Header.Get("x-ms-version-id") -} - -// BlobHierarchyListSegment ... -type BlobHierarchyListSegment struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Blobs"` - BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` - BlobItems []BlobItemInternal `xml:"Blob"` -} - -// BlobItemInternal - An Azure Storage blob -type BlobItemInternal struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Blob"` - Name string `xml:"Name"` - Deleted bool `xml:"Deleted"` - Snapshot string `xml:"Snapshot"` - VersionID *string `xml:"VersionId"` - IsCurrentVersion *bool `xml:"IsCurrentVersion"` - Properties BlobPropertiesInternal `xml:"Properties"` - Metadata Metadata `xml:"Metadata"` - BlobTags *BlobTags `xml:"Tags"` - ObjectReplicationMetadata map[string]string `xml:"ObjectReplicationMetadata"` - HasVersionsOnly *bool `xml:"HasVersionsOnly"` -} - -// BlobPrefix ... -type BlobPrefix struct { - Name string `xml:"Name"` -} - -// BlobPropertiesInternal - Properties of a blob -type BlobPropertiesInternal struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Properties"` - CreationTime *time.Time `xml:"Creation-Time"` - LastModified time.Time `xml:"Last-Modified"` - Etag ETag `xml:"Etag"` - // ContentLength - Size in bytes - ContentLength *int64 `xml:"Content-Length"` - ContentType *string `xml:"Content-Type"` - ContentEncoding *string `xml:"Content-Encoding"` - ContentLanguage *string `xml:"Content-Language"` - ContentMD5 []byte `xml:"Content-MD5"` - ContentDisposition *string `xml:"Content-Disposition"` - CacheControl *string `xml:"Cache-Control"` - BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` - // BlobType - Possible values include: 'BlobBlockBlob', 'BlobPageBlob', 'BlobAppendBlob', 'BlobNone' - BlobType BlobType `xml:"BlobType"` - // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' - LeaseStatus LeaseStatusType `xml:"LeaseStatus"` - // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' - LeaseState LeaseStateType `xml:"LeaseState"` - // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' - LeaseDuration LeaseDurationType `xml:"LeaseDuration"` - CopyID *string `xml:"CopyId"` - // CopyStatus - Possible values include: 'CopyStatusPending', 'CopyStatusSuccess', 'CopyStatusAborted', 'CopyStatusFailed', 'CopyStatusNone' - CopyStatus CopyStatusType `xml:"CopyStatus"` - CopySource *string `xml:"CopySource"` - CopyProgress *string `xml:"CopyProgress"` - CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` - CopyStatusDescription *string `xml:"CopyStatusDescription"` - ServerEncrypted *bool `xml:"ServerEncrypted"` - IncrementalCopy *bool `xml:"IncrementalCopy"` - DestinationSnapshot *string `xml:"DestinationSnapshot"` - DeletedTime *time.Time `xml:"DeletedTime"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` - // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP15', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierP60', 'AccessTierP70', 'AccessTierP80', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' - AccessTier AccessTierType `xml:"AccessTier"` - AccessTierInferred *bool `xml:"AccessTierInferred"` - // ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone' - ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` - CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` - // EncryptionScope - The name of the encryption scope under which the blob is encrypted. - EncryptionScope *string `xml:"EncryptionScope"` - AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` - TagCount *int32 `xml:"TagCount"` - ExpiresOn *time.Time `xml:"Expiry-Time"` - IsSealed *bool `xml:"Sealed"` - // RehydratePriority - Possible values include: 'RehydratePriorityHigh', 'RehydratePriorityStandard', 'RehydratePriorityNone' - RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` - LastAccessedOn *time.Time `xml:"LastAccessTime"` - ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` - // ImmutabilityPolicyMode - Possible values include: 'BlobImmutabilityPolicyModeMutable', 'BlobImmutabilityPolicyModeUnlocked', 'BlobImmutabilityPolicyModeLocked', 'BlobImmutabilityPolicyModeNone' - ImmutabilityPolicyMode BlobImmutabilityPolicyModeType `xml:"ImmutabilityPolicyMode"` - LegalHold *bool `xml:"LegalHold"` - Owner *string `xml:"Owner"` - Group *string `xml:"Group"` - Permissions *string `xml:"Permissions"` - ACL *string `xml:"Acl"` -} - -// MarshalXML implements the xml.Marshaler interface for BlobPropertiesInternal. -func (bpi BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - bpi2 := (*blobPropertiesInternal)(unsafe.Pointer(&bpi)) - return e.EncodeElement(*bpi2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for BlobPropertiesInternal. -func (bpi *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - bpi2 := (*blobPropertiesInternal)(unsafe.Pointer(bpi)) - return d.DecodeElement(bpi2, &start) -} - -// BlobReleaseLeaseResponse ... -type BlobReleaseLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (brlr BlobReleaseLeaseResponse) Response() *http.Response { - return brlr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (brlr BlobReleaseLeaseResponse) StatusCode() int { - return brlr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (brlr BlobReleaseLeaseResponse) Status() string { - return brlr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (brlr BlobReleaseLeaseResponse) ClientRequestID() string { - return brlr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (brlr BlobReleaseLeaseResponse) Date() time.Time { - s := brlr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (brlr BlobReleaseLeaseResponse) ErrorCode() string { - return brlr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (brlr BlobReleaseLeaseResponse) ETag() ETag { - return ETag(brlr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (brlr BlobReleaseLeaseResponse) LastModified() time.Time { - s := brlr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (brlr BlobReleaseLeaseResponse) RequestID() string { - return brlr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (brlr BlobReleaseLeaseResponse) Version() string { - return brlr.rawResponse.Header.Get("x-ms-version") -} - -// BlobRenewLeaseResponse ... -type BlobRenewLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (brlr BlobRenewLeaseResponse) Response() *http.Response { - return brlr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (brlr BlobRenewLeaseResponse) StatusCode() int { - return brlr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (brlr BlobRenewLeaseResponse) Status() string { - return brlr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (brlr BlobRenewLeaseResponse) ClientRequestID() string { - return brlr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (brlr BlobRenewLeaseResponse) Date() time.Time { - s := brlr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (brlr BlobRenewLeaseResponse) ErrorCode() string { - return brlr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (brlr BlobRenewLeaseResponse) ETag() ETag { - return ETag(brlr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (brlr BlobRenewLeaseResponse) LastModified() time.Time { - s := brlr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseID returns the value for header x-ms-lease-id. -func (brlr BlobRenewLeaseResponse) LeaseID() string { - return brlr.rawResponse.Header.Get("x-ms-lease-id") -} - -// RequestID returns the value for header x-ms-request-id. -func (brlr BlobRenewLeaseResponse) RequestID() string { - return brlr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (brlr BlobRenewLeaseResponse) Version() string { - return brlr.rawResponse.Header.Get("x-ms-version") -} - -// BlobSetExpiryResponse ... -type BlobSetExpiryResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bser BlobSetExpiryResponse) Response() *http.Response { - return bser.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bser BlobSetExpiryResponse) StatusCode() int { - return bser.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bser BlobSetExpiryResponse) Status() string { - return bser.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bser BlobSetExpiryResponse) ClientRequestID() string { - return bser.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bser BlobSetExpiryResponse) Date() time.Time { - s := bser.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bser BlobSetExpiryResponse) ErrorCode() string { - return bser.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bser BlobSetExpiryResponse) ETag() ETag { - return ETag(bser.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (bser BlobSetExpiryResponse) LastModified() time.Time { - s := bser.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bser BlobSetExpiryResponse) RequestID() string { - return bser.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bser BlobSetExpiryResponse) Version() string { - return bser.rawResponse.Header.Get("x-ms-version") -} - -// BlobSetHTTPHeadersResponse ... -type BlobSetHTTPHeadersResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bshhr BlobSetHTTPHeadersResponse) Response() *http.Response { - return bshhr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bshhr BlobSetHTTPHeadersResponse) StatusCode() int { - return bshhr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bshhr BlobSetHTTPHeadersResponse) Status() string { - return bshhr.rawResponse.Status -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 { - s := bshhr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bshhr BlobSetHTTPHeadersResponse) ClientRequestID() string { - return bshhr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time { - s := bshhr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bshhr BlobSetHTTPHeadersResponse) ErrorCode() string { - return bshhr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bshhr BlobSetHTTPHeadersResponse) ETag() ETag { - return ETag(bshhr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (bshhr BlobSetHTTPHeadersResponse) LastModified() time.Time { - s := bshhr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bshhr BlobSetHTTPHeadersResponse) RequestID() string { - return bshhr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bshhr BlobSetHTTPHeadersResponse) Version() string { - return bshhr.rawResponse.Header.Get("x-ms-version") -} - -// BlobSetImmutabilityPolicyResponse ... -type BlobSetImmutabilityPolicyResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bsipr BlobSetImmutabilityPolicyResponse) Response() *http.Response { - return bsipr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bsipr BlobSetImmutabilityPolicyResponse) StatusCode() int { - return bsipr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bsipr BlobSetImmutabilityPolicyResponse) Status() string { - return bsipr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bsipr BlobSetImmutabilityPolicyResponse) ClientRequestID() string { - return bsipr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bsipr BlobSetImmutabilityPolicyResponse) Date() time.Time { - s := bsipr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bsipr BlobSetImmutabilityPolicyResponse) ErrorCode() string { - return bsipr.rawResponse.Header.Get("x-ms-error-code") -} - -// ImmutabilityPolicyExpiry returns the value for header x-ms-immutability-policy-until-date. -func (bsipr BlobSetImmutabilityPolicyResponse) ImmutabilityPolicyExpiry() time.Time { - s := bsipr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. -func (bsipr BlobSetImmutabilityPolicyResponse) ImmutabilityPolicyMode() BlobImmutabilityPolicyModeType { - return BlobImmutabilityPolicyModeType(bsipr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) -} - -// RequestID returns the value for header x-ms-request-id. -func (bsipr BlobSetImmutabilityPolicyResponse) RequestID() string { - return bsipr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bsipr BlobSetImmutabilityPolicyResponse) Version() string { - return bsipr.rawResponse.Header.Get("x-ms-version") -} - -// BlobSetLegalHoldResponse ... -type BlobSetLegalHoldResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bslhr BlobSetLegalHoldResponse) Response() *http.Response { - return bslhr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bslhr BlobSetLegalHoldResponse) StatusCode() int { - return bslhr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bslhr BlobSetLegalHoldResponse) Status() string { - return bslhr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bslhr BlobSetLegalHoldResponse) ClientRequestID() string { - return bslhr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bslhr BlobSetLegalHoldResponse) Date() time.Time { - s := bslhr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bslhr BlobSetLegalHoldResponse) ErrorCode() string { - return bslhr.rawResponse.Header.Get("x-ms-error-code") -} - -// LegalHold returns the value for header x-ms-legal-hold. -func (bslhr BlobSetLegalHoldResponse) LegalHold() string { - return bslhr.rawResponse.Header.Get("x-ms-legal-hold") -} - -// RequestID returns the value for header x-ms-request-id. -func (bslhr BlobSetLegalHoldResponse) RequestID() string { - return bslhr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bslhr BlobSetLegalHoldResponse) Version() string { - return bslhr.rawResponse.Header.Get("x-ms-version") -} - -// BlobSetMetadataResponse ... -type BlobSetMetadataResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bsmr BlobSetMetadataResponse) Response() *http.Response { - return bsmr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bsmr BlobSetMetadataResponse) StatusCode() int { - return bsmr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bsmr BlobSetMetadataResponse) Status() string { - return bsmr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bsmr BlobSetMetadataResponse) ClientRequestID() string { - return bsmr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bsmr BlobSetMetadataResponse) Date() time.Time { - s := bsmr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (bsmr BlobSetMetadataResponse) EncryptionKeySha256() string { - return bsmr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (bsmr BlobSetMetadataResponse) EncryptionScope() string { - return bsmr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bsmr BlobSetMetadataResponse) ErrorCode() string { - return bsmr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bsmr BlobSetMetadataResponse) ETag() ETag { - return ETag(bsmr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (bsmr BlobSetMetadataResponse) IsServerEncrypted() string { - return bsmr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (bsmr BlobSetMetadataResponse) LastModified() time.Time { - s := bsmr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bsmr BlobSetMetadataResponse) RequestID() string { - return bsmr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bsmr BlobSetMetadataResponse) Version() string { - return bsmr.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bsmr BlobSetMetadataResponse) VersionID() string { - return bsmr.rawResponse.Header.Get("x-ms-version-id") -} - -// BlobSetTagsResponse ... -type BlobSetTagsResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bstr BlobSetTagsResponse) Response() *http.Response { - return bstr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bstr BlobSetTagsResponse) StatusCode() int { - return bstr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bstr BlobSetTagsResponse) Status() string { - return bstr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bstr BlobSetTagsResponse) ClientRequestID() string { - return bstr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bstr BlobSetTagsResponse) Date() time.Time { - s := bstr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bstr BlobSetTagsResponse) ErrorCode() string { - return bstr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bstr BlobSetTagsResponse) RequestID() string { - return bstr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bstr BlobSetTagsResponse) Version() string { - return bstr.rawResponse.Header.Get("x-ms-version") -} - -// BlobSetTierResponse ... -type BlobSetTierResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bstr BlobSetTierResponse) Response() *http.Response { - return bstr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bstr BlobSetTierResponse) StatusCode() int { - return bstr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bstr BlobSetTierResponse) Status() string { - return bstr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bstr BlobSetTierResponse) ClientRequestID() string { - return bstr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bstr BlobSetTierResponse) ErrorCode() string { - return bstr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bstr BlobSetTierResponse) RequestID() string { - return bstr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bstr BlobSetTierResponse) Version() string { - return bstr.rawResponse.Header.Get("x-ms-version") -} - -// BlobStartCopyFromURLResponse ... -type BlobStartCopyFromURLResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bscfur BlobStartCopyFromURLResponse) Response() *http.Response { - return bscfur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bscfur BlobStartCopyFromURLResponse) StatusCode() int { - return bscfur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bscfur BlobStartCopyFromURLResponse) Status() string { - return bscfur.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bscfur BlobStartCopyFromURLResponse) ClientRequestID() string { - return bscfur.rawResponse.Header.Get("x-ms-client-request-id") -} - -// CopyID returns the value for header x-ms-copy-id. -func (bscfur BlobStartCopyFromURLResponse) CopyID() string { - return bscfur.rawResponse.Header.Get("x-ms-copy-id") -} - -// CopyStatus returns the value for header x-ms-copy-status. -func (bscfur BlobStartCopyFromURLResponse) CopyStatus() CopyStatusType { - return CopyStatusType(bscfur.rawResponse.Header.Get("x-ms-copy-status")) -} - -// Date returns the value for header Date. -func (bscfur BlobStartCopyFromURLResponse) Date() time.Time { - s := bscfur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bscfur BlobStartCopyFromURLResponse) ErrorCode() string { - return bscfur.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bscfur BlobStartCopyFromURLResponse) ETag() ETag { - return ETag(bscfur.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (bscfur BlobStartCopyFromURLResponse) LastModified() time.Time { - s := bscfur.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bscfur BlobStartCopyFromURLResponse) RequestID() string { - return bscfur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bscfur BlobStartCopyFromURLResponse) Version() string { - return bscfur.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bscfur BlobStartCopyFromURLResponse) VersionID() string { - return bscfur.rawResponse.Header.Get("x-ms-version-id") -} - -// BlobTag ... -type BlobTag struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Tag"` - Key string `xml:"Key"` - Value string `xml:"Value"` -} - -// BlobTags - Blob tags -type BlobTags struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Tags"` - BlobTagSet []BlobTag `xml:"TagSet>Tag"` -} - -// Response returns the raw HTTP response object. -func (bt BlobTags) Response() *http.Response { - return bt.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bt BlobTags) StatusCode() int { - return bt.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bt BlobTags) Status() string { - return bt.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bt BlobTags) ClientRequestID() string { - return bt.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bt BlobTags) Date() time.Time { - s := bt.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bt BlobTags) ErrorCode() string { - return bt.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bt BlobTags) RequestID() string { - return bt.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bt BlobTags) Version() string { - return bt.rawResponse.Header.Get("x-ms-version") -} - -// BlobUndeleteResponse ... -type BlobUndeleteResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bur BlobUndeleteResponse) Response() *http.Response { - return bur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bur BlobUndeleteResponse) StatusCode() int { - return bur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bur BlobUndeleteResponse) Status() string { - return bur.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bur BlobUndeleteResponse) ClientRequestID() string { - return bur.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (bur BlobUndeleteResponse) Date() time.Time { - s := bur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bur BlobUndeleteResponse) ErrorCode() string { - return bur.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (bur BlobUndeleteResponse) RequestID() string { - return bur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bur BlobUndeleteResponse) Version() string { - return bur.rawResponse.Header.Get("x-ms-version") -} - -// Block - Represents a single block in a block blob. It describes the block's ID and size. -type Block struct { - // Name - The base64 encoded block ID. - Name string `xml:"Name"` - // Size - The block size in bytes. - Size int64 `xml:"Size"` -} - -// BlockBlobCommitBlockListResponse ... -type BlockBlobCommitBlockListResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bbcblr BlockBlobCommitBlockListResponse) Response() *http.Response { - return bbcblr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bbcblr BlockBlobCommitBlockListResponse) StatusCode() int { - return bbcblr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bbcblr BlockBlobCommitBlockListResponse) Status() string { - return bbcblr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bbcblr BlockBlobCommitBlockListResponse) ClientRequestID() string { - return bbcblr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte { - s := bbcblr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time { - s := bbcblr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (bbcblr BlockBlobCommitBlockListResponse) EncryptionKeySha256() string { - return bbcblr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (bbcblr BlockBlobCommitBlockListResponse) EncryptionScope() string { - return bbcblr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string { - return bbcblr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bbcblr BlockBlobCommitBlockListResponse) ETag() ETag { - return ETag(bbcblr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (bbcblr BlockBlobCommitBlockListResponse) IsServerEncrypted() string { - return bbcblr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (bbcblr BlockBlobCommitBlockListResponse) LastModified() time.Time { - s := bbcblr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bbcblr BlockBlobCommitBlockListResponse) RequestID() string { - return bbcblr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bbcblr BlockBlobCommitBlockListResponse) Version() string { - return bbcblr.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bbcblr BlockBlobCommitBlockListResponse) VersionID() string { - return bbcblr.rawResponse.Header.Get("x-ms-version-id") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (bbcblr BlockBlobCommitBlockListResponse) XMsContentCrc64() []byte { - s := bbcblr.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// BlockBlobPutBlobFromURLResponse ... -type BlockBlobPutBlobFromURLResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bbpbfur BlockBlobPutBlobFromURLResponse) Response() *http.Response { - return bbpbfur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bbpbfur BlockBlobPutBlobFromURLResponse) StatusCode() int { - return bbpbfur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bbpbfur BlockBlobPutBlobFromURLResponse) Status() string { - return bbpbfur.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bbpbfur BlockBlobPutBlobFromURLResponse) ClientRequestID() string { - return bbpbfur.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (bbpbfur BlockBlobPutBlobFromURLResponse) ContentMD5() []byte { - s := bbpbfur.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (bbpbfur BlockBlobPutBlobFromURLResponse) Date() time.Time { - s := bbpbfur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (bbpbfur BlockBlobPutBlobFromURLResponse) EncryptionKeySha256() string { - return bbpbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (bbpbfur BlockBlobPutBlobFromURLResponse) EncryptionScope() string { - return bbpbfur.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bbpbfur BlockBlobPutBlobFromURLResponse) ErrorCode() string { - return bbpbfur.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bbpbfur BlockBlobPutBlobFromURLResponse) ETag() ETag { - return ETag(bbpbfur.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (bbpbfur BlockBlobPutBlobFromURLResponse) IsServerEncrypted() string { - return bbpbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (bbpbfur BlockBlobPutBlobFromURLResponse) LastModified() time.Time { - s := bbpbfur.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bbpbfur BlockBlobPutBlobFromURLResponse) RequestID() string { - return bbpbfur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bbpbfur BlockBlobPutBlobFromURLResponse) Version() string { - return bbpbfur.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bbpbfur BlockBlobPutBlobFromURLResponse) VersionID() string { - return bbpbfur.rawResponse.Header.Get("x-ms-version-id") -} - -// BlockBlobStageBlockFromURLResponse ... -type BlockBlobStageBlockFromURLResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bbsbfur BlockBlobStageBlockFromURLResponse) Response() *http.Response { - return bbsbfur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bbsbfur BlockBlobStageBlockFromURLResponse) StatusCode() int { - return bbsbfur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string { - return bbsbfur.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bbsbfur BlockBlobStageBlockFromURLResponse) ClientRequestID() string { - return bbsbfur.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte { - s := bbsbfur.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time { - s := bbsbfur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionKeySha256() string { - return bbsbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionScope() string { - return bbsbfur.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string { - return bbsbfur.rawResponse.Header.Get("x-ms-error-code") -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (bbsbfur BlockBlobStageBlockFromURLResponse) IsServerEncrypted() string { - return bbsbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// RequestID returns the value for header x-ms-request-id. -func (bbsbfur BlockBlobStageBlockFromURLResponse) RequestID() string { - return bbsbfur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string { - return bbsbfur.rawResponse.Header.Get("x-ms-version") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (bbsbfur BlockBlobStageBlockFromURLResponse) XMsContentCrc64() []byte { - s := bbsbfur.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// BlockBlobStageBlockResponse ... -type BlockBlobStageBlockResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bbsbr BlockBlobStageBlockResponse) Response() *http.Response { - return bbsbr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bbsbr BlockBlobStageBlockResponse) StatusCode() int { - return bbsbr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bbsbr BlockBlobStageBlockResponse) Status() string { - return bbsbr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bbsbr BlockBlobStageBlockResponse) ClientRequestID() string { - return bbsbr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte { - s := bbsbr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (bbsbr BlockBlobStageBlockResponse) Date() time.Time { - s := bbsbr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (bbsbr BlockBlobStageBlockResponse) EncryptionKeySha256() string { - return bbsbr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (bbsbr BlockBlobStageBlockResponse) EncryptionScope() string { - return bbsbr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string { - return bbsbr.rawResponse.Header.Get("x-ms-error-code") -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (bbsbr BlockBlobStageBlockResponse) IsServerEncrypted() string { - return bbsbr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// RequestID returns the value for header x-ms-request-id. -func (bbsbr BlockBlobStageBlockResponse) RequestID() string { - return bbsbr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bbsbr BlockBlobStageBlockResponse) Version() string { - return bbsbr.rawResponse.Header.Get("x-ms-version") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (bbsbr BlockBlobStageBlockResponse) XMsContentCrc64() []byte { - s := bbsbr.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// BlockBlobUploadResponse ... -type BlockBlobUploadResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (bbur BlockBlobUploadResponse) Response() *http.Response { - return bbur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bbur BlockBlobUploadResponse) StatusCode() int { - return bbur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bbur BlockBlobUploadResponse) Status() string { - return bbur.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bbur BlockBlobUploadResponse) ClientRequestID() string { - return bbur.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (bbur BlockBlobUploadResponse) ContentMD5() []byte { - s := bbur.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (bbur BlockBlobUploadResponse) Date() time.Time { - s := bbur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (bbur BlockBlobUploadResponse) EncryptionKeySha256() string { - return bbur.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (bbur BlockBlobUploadResponse) EncryptionScope() string { - return bbur.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bbur BlockBlobUploadResponse) ErrorCode() string { - return bbur.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bbur BlockBlobUploadResponse) ETag() ETag { - return ETag(bbur.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (bbur BlockBlobUploadResponse) IsServerEncrypted() string { - return bbur.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (bbur BlockBlobUploadResponse) LastModified() time.Time { - s := bbur.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bbur BlockBlobUploadResponse) RequestID() string { - return bbur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bbur BlockBlobUploadResponse) Version() string { - return bbur.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (bbur BlockBlobUploadResponse) VersionID() string { - return bbur.rawResponse.Header.Get("x-ms-version-id") -} - -// BlockList ... -type BlockList struct { - rawResponse *http.Response - CommittedBlocks []Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks []Block `xml:"UncommittedBlocks>Block"` -} - -// Response returns the raw HTTP response object. -func (bl BlockList) Response() *http.Response { - return bl.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (bl BlockList) StatusCode() int { - return bl.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (bl BlockList) Status() string { - return bl.rawResponse.Status -} - -// BlobContentLength returns the value for header x-ms-blob-content-length. -func (bl BlockList) BlobContentLength() int64 { - s := bl.rawResponse.Header.Get("x-ms-blob-content-length") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (bl BlockList) ClientRequestID() string { - return bl.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentType returns the value for header Content-Type. -func (bl BlockList) ContentType() string { - return bl.rawResponse.Header.Get("Content-Type") -} - -// Date returns the value for header Date. -func (bl BlockList) Date() time.Time { - s := bl.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (bl BlockList) ErrorCode() string { - return bl.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (bl BlockList) ETag() ETag { - return ETag(bl.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (bl BlockList) LastModified() time.Time { - s := bl.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (bl BlockList) RequestID() string { - return bl.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (bl BlockList) Version() string { - return bl.rawResponse.Header.Get("x-ms-version") -} - -// BlockLookupList ... -type BlockLookupList struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"BlockList"` - Committed []string `xml:"Committed"` - Uncommitted []string `xml:"Uncommitted"` - Latest []string `xml:"Latest"` -} - -// ClearRange ... -type ClearRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -// ContainerAcquireLeaseResponse ... -type ContainerAcquireLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (calr ContainerAcquireLeaseResponse) Response() *http.Response { - return calr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (calr ContainerAcquireLeaseResponse) StatusCode() int { - return calr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (calr ContainerAcquireLeaseResponse) Status() string { - return calr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (calr ContainerAcquireLeaseResponse) ClientRequestID() string { - return calr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (calr ContainerAcquireLeaseResponse) Date() time.Time { - s := calr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (calr ContainerAcquireLeaseResponse) ErrorCode() string { - return calr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (calr ContainerAcquireLeaseResponse) ETag() ETag { - return ETag(calr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (calr ContainerAcquireLeaseResponse) LastModified() time.Time { - s := calr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseID returns the value for header x-ms-lease-id. -func (calr ContainerAcquireLeaseResponse) LeaseID() string { - return calr.rawResponse.Header.Get("x-ms-lease-id") -} - -// RequestID returns the value for header x-ms-request-id. -func (calr ContainerAcquireLeaseResponse) RequestID() string { - return calr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (calr ContainerAcquireLeaseResponse) Version() string { - return calr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerBreakLeaseResponse ... -type ContainerBreakLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (cblr ContainerBreakLeaseResponse) Response() *http.Response { - return cblr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (cblr ContainerBreakLeaseResponse) StatusCode() int { - return cblr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (cblr ContainerBreakLeaseResponse) Status() string { - return cblr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (cblr ContainerBreakLeaseResponse) ClientRequestID() string { - return cblr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (cblr ContainerBreakLeaseResponse) Date() time.Time { - s := cblr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (cblr ContainerBreakLeaseResponse) ErrorCode() string { - return cblr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (cblr ContainerBreakLeaseResponse) ETag() ETag { - return ETag(cblr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (cblr ContainerBreakLeaseResponse) LastModified() time.Time { - s := cblr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseTime returns the value for header x-ms-lease-time. -func (cblr ContainerBreakLeaseResponse) LeaseTime() int32 { - s := cblr.rawResponse.Header.Get("x-ms-lease-time") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// RequestID returns the value for header x-ms-request-id. -func (cblr ContainerBreakLeaseResponse) RequestID() string { - return cblr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (cblr ContainerBreakLeaseResponse) Version() string { - return cblr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerChangeLeaseResponse ... -type ContainerChangeLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (cclr ContainerChangeLeaseResponse) Response() *http.Response { - return cclr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (cclr ContainerChangeLeaseResponse) StatusCode() int { - return cclr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (cclr ContainerChangeLeaseResponse) Status() string { - return cclr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (cclr ContainerChangeLeaseResponse) ClientRequestID() string { - return cclr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (cclr ContainerChangeLeaseResponse) Date() time.Time { - s := cclr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (cclr ContainerChangeLeaseResponse) ErrorCode() string { - return cclr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (cclr ContainerChangeLeaseResponse) ETag() ETag { - return ETag(cclr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (cclr ContainerChangeLeaseResponse) LastModified() time.Time { - s := cclr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseID returns the value for header x-ms-lease-id. -func (cclr ContainerChangeLeaseResponse) LeaseID() string { - return cclr.rawResponse.Header.Get("x-ms-lease-id") -} - -// RequestID returns the value for header x-ms-request-id. -func (cclr ContainerChangeLeaseResponse) RequestID() string { - return cclr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (cclr ContainerChangeLeaseResponse) Version() string { - return cclr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerCreateResponse ... -type ContainerCreateResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (ccr ContainerCreateResponse) Response() *http.Response { - return ccr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (ccr ContainerCreateResponse) StatusCode() int { - return ccr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (ccr ContainerCreateResponse) Status() string { - return ccr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (ccr ContainerCreateResponse) ClientRequestID() string { - return ccr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (ccr ContainerCreateResponse) Date() time.Time { - s := ccr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (ccr ContainerCreateResponse) ErrorCode() string { - return ccr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (ccr ContainerCreateResponse) ETag() ETag { - return ETag(ccr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (ccr ContainerCreateResponse) LastModified() time.Time { - s := ccr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (ccr ContainerCreateResponse) RequestID() string { - return ccr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (ccr ContainerCreateResponse) Version() string { - return ccr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerDeleteResponse ... -type ContainerDeleteResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (cdr ContainerDeleteResponse) Response() *http.Response { - return cdr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (cdr ContainerDeleteResponse) StatusCode() int { - return cdr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (cdr ContainerDeleteResponse) Status() string { - return cdr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (cdr ContainerDeleteResponse) ClientRequestID() string { - return cdr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (cdr ContainerDeleteResponse) Date() time.Time { - s := cdr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (cdr ContainerDeleteResponse) ErrorCode() string { - return cdr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (cdr ContainerDeleteResponse) RequestID() string { - return cdr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (cdr ContainerDeleteResponse) Version() string { - return cdr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerGetAccountInfoResponse ... -type ContainerGetAccountInfoResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (cgair ContainerGetAccountInfoResponse) Response() *http.Response { - return cgair.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (cgair ContainerGetAccountInfoResponse) StatusCode() int { - return cgair.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (cgair ContainerGetAccountInfoResponse) Status() string { - return cgair.rawResponse.Status -} - -// AccountKind returns the value for header x-ms-account-kind. -func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType { - return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind")) -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (cgair ContainerGetAccountInfoResponse) ClientRequestID() string { - return cgair.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (cgair ContainerGetAccountInfoResponse) Date() time.Time { - s := cgair.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (cgair ContainerGetAccountInfoResponse) ErrorCode() string { - return cgair.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (cgair ContainerGetAccountInfoResponse) RequestID() string { - return cgair.rawResponse.Header.Get("x-ms-request-id") -} - -// SkuName returns the value for header x-ms-sku-name. -func (cgair ContainerGetAccountInfoResponse) SkuName() SkuNameType { - return SkuNameType(cgair.rawResponse.Header.Get("x-ms-sku-name")) -} - -// Version returns the value for header x-ms-version. -func (cgair ContainerGetAccountInfoResponse) Version() string { - return cgair.rawResponse.Header.Get("x-ms-version") -} - -// ContainerGetPropertiesResponse ... -type ContainerGetPropertiesResponse struct { - rawResponse *http.Response -} - -// NewMetadata returns user-defined key/value pairs. -func (cgpr ContainerGetPropertiesResponse) NewMetadata() Metadata { - md := Metadata{} - for k, v := range cgpr.rawResponse.Header { - if len(k) > mdPrefixLen { - if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { - md[strings.ToLower(k[mdPrefixLen:])] = v[0] - } - } - } - return md -} - -// Response returns the raw HTTP response object. -func (cgpr ContainerGetPropertiesResponse) Response() *http.Response { - return cgpr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (cgpr ContainerGetPropertiesResponse) StatusCode() int { - return cgpr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (cgpr ContainerGetPropertiesResponse) Status() string { - return cgpr.rawResponse.Status -} - -// BlobPublicAccess returns the value for header x-ms-blob-public-access. -func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType { - return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access")) -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (cgpr ContainerGetPropertiesResponse) ClientRequestID() string { - return cgpr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (cgpr ContainerGetPropertiesResponse) Date() time.Time { - s := cgpr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// DefaultEncryptionScope returns the value for header x-ms-default-encryption-scope. -func (cgpr ContainerGetPropertiesResponse) DefaultEncryptionScope() string { - return cgpr.rawResponse.Header.Get("x-ms-default-encryption-scope") -} - -// DenyEncryptionScopeOverride returns the value for header x-ms-deny-encryption-scope-override. -func (cgpr ContainerGetPropertiesResponse) DenyEncryptionScopeOverride() string { - return cgpr.rawResponse.Header.Get("x-ms-deny-encryption-scope-override") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (cgpr ContainerGetPropertiesResponse) ErrorCode() string { - return cgpr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (cgpr ContainerGetPropertiesResponse) ETag() ETag { - return ETag(cgpr.rawResponse.Header.Get("ETag")) -} - -// HasImmutabilityPolicy returns the value for header x-ms-has-immutability-policy. -func (cgpr ContainerGetPropertiesResponse) HasImmutabilityPolicy() string { - return cgpr.rawResponse.Header.Get("x-ms-has-immutability-policy") -} - -// HasLegalHold returns the value for header x-ms-has-legal-hold. -func (cgpr ContainerGetPropertiesResponse) HasLegalHold() string { - return cgpr.rawResponse.Header.Get("x-ms-has-legal-hold") -} - -// IsImmutableStorageWithVersioningEnabled returns the value for header x-ms-immutable-storage-with-versioning-enabled. -func (cgpr ContainerGetPropertiesResponse) IsImmutableStorageWithVersioningEnabled() string { - return cgpr.rawResponse.Header.Get("x-ms-immutable-storage-with-versioning-enabled") -} - -// LastModified returns the value for header Last-Modified. -func (cgpr ContainerGetPropertiesResponse) LastModified() time.Time { - s := cgpr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseDuration returns the value for header x-ms-lease-duration. -func (cgpr ContainerGetPropertiesResponse) LeaseDuration() LeaseDurationType { - return LeaseDurationType(cgpr.rawResponse.Header.Get("x-ms-lease-duration")) -} - -// LeaseState returns the value for header x-ms-lease-state. -func (cgpr ContainerGetPropertiesResponse) LeaseState() LeaseStateType { - return LeaseStateType(cgpr.rawResponse.Header.Get("x-ms-lease-state")) -} - -// LeaseStatus returns the value for header x-ms-lease-status. -func (cgpr ContainerGetPropertiesResponse) LeaseStatus() LeaseStatusType { - return LeaseStatusType(cgpr.rawResponse.Header.Get("x-ms-lease-status")) -} - -// RequestID returns the value for header x-ms-request-id. -func (cgpr ContainerGetPropertiesResponse) RequestID() string { - return cgpr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (cgpr ContainerGetPropertiesResponse) Version() string { - return cgpr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerItem - An Azure Storage container -type ContainerItem struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Container"` - Name string `xml:"Name"` - Deleted *bool `xml:"Deleted"` - Version *string `xml:"Version"` - Properties ContainerProperties `xml:"Properties"` - Metadata Metadata `xml:"Metadata"` -} - -// ContainerProperties - Properties of a container -type ContainerProperties struct { - LastModified time.Time `xml:"Last-Modified"` - Etag ETag `xml:"Etag"` - // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' - LeaseStatus LeaseStatusType `xml:"LeaseStatus"` - // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' - LeaseState LeaseStateType `xml:"LeaseState"` - // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' - LeaseDuration LeaseDurationType `xml:"LeaseDuration"` - // PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' - PublicAccess PublicAccessType `xml:"PublicAccess"` - HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` - HasLegalHold *bool `xml:"HasLegalHold"` - DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` - PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` - DeletedTime *time.Time `xml:"DeletedTime"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` - // IsImmutableStorageWithVersioningEnabled - Indicates if version level worm is enabled on this container. - IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` -} - -// MarshalXML implements the xml.Marshaler interface for ContainerProperties. -func (cp ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - cp2 := (*containerProperties)(unsafe.Pointer(&cp)) - return e.EncodeElement(*cp2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for ContainerProperties. -func (cp *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - cp2 := (*containerProperties)(unsafe.Pointer(cp)) - return d.DecodeElement(cp2, &start) -} - -// ContainerReleaseLeaseResponse ... -type ContainerReleaseLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (crlr ContainerReleaseLeaseResponse) Response() *http.Response { - return crlr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (crlr ContainerReleaseLeaseResponse) StatusCode() int { - return crlr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (crlr ContainerReleaseLeaseResponse) Status() string { - return crlr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (crlr ContainerReleaseLeaseResponse) ClientRequestID() string { - return crlr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (crlr ContainerReleaseLeaseResponse) Date() time.Time { - s := crlr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (crlr ContainerReleaseLeaseResponse) ErrorCode() string { - return crlr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (crlr ContainerReleaseLeaseResponse) ETag() ETag { - return ETag(crlr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time { - s := crlr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (crlr ContainerReleaseLeaseResponse) RequestID() string { - return crlr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (crlr ContainerReleaseLeaseResponse) Version() string { - return crlr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerRenameResponse ... -type ContainerRenameResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (crr ContainerRenameResponse) Response() *http.Response { - return crr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (crr ContainerRenameResponse) StatusCode() int { - return crr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (crr ContainerRenameResponse) Status() string { - return crr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (crr ContainerRenameResponse) ClientRequestID() string { - return crr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (crr ContainerRenameResponse) Date() time.Time { - s := crr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (crr ContainerRenameResponse) ErrorCode() string { - return crr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (crr ContainerRenameResponse) RequestID() string { - return crr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (crr ContainerRenameResponse) Version() string { - return crr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerRenewLeaseResponse ... -type ContainerRenewLeaseResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (crlr ContainerRenewLeaseResponse) Response() *http.Response { - return crlr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (crlr ContainerRenewLeaseResponse) StatusCode() int { - return crlr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (crlr ContainerRenewLeaseResponse) Status() string { - return crlr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (crlr ContainerRenewLeaseResponse) ClientRequestID() string { - return crlr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (crlr ContainerRenewLeaseResponse) Date() time.Time { - s := crlr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (crlr ContainerRenewLeaseResponse) ErrorCode() string { - return crlr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (crlr ContainerRenewLeaseResponse) ETag() ETag { - return ETag(crlr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (crlr ContainerRenewLeaseResponse) LastModified() time.Time { - s := crlr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseID returns the value for header x-ms-lease-id. -func (crlr ContainerRenewLeaseResponse) LeaseID() string { - return crlr.rawResponse.Header.Get("x-ms-lease-id") -} - -// RequestID returns the value for header x-ms-request-id. -func (crlr ContainerRenewLeaseResponse) RequestID() string { - return crlr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (crlr ContainerRenewLeaseResponse) Version() string { - return crlr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerRestoreResponse ... -type ContainerRestoreResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (crr ContainerRestoreResponse) Response() *http.Response { - return crr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (crr ContainerRestoreResponse) StatusCode() int { - return crr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (crr ContainerRestoreResponse) Status() string { - return crr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (crr ContainerRestoreResponse) ClientRequestID() string { - return crr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (crr ContainerRestoreResponse) Date() time.Time { - s := crr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (crr ContainerRestoreResponse) ErrorCode() string { - return crr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (crr ContainerRestoreResponse) RequestID() string { - return crr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (crr ContainerRestoreResponse) Version() string { - return crr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerSetAccessPolicyResponse ... -type ContainerSetAccessPolicyResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response { - return csapr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (csapr ContainerSetAccessPolicyResponse) StatusCode() int { - return csapr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (csapr ContainerSetAccessPolicyResponse) Status() string { - return csapr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (csapr ContainerSetAccessPolicyResponse) ClientRequestID() string { - return csapr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (csapr ContainerSetAccessPolicyResponse) Date() time.Time { - s := csapr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string { - return csapr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (csapr ContainerSetAccessPolicyResponse) ETag() ETag { - return ETag(csapr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time { - s := csapr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (csapr ContainerSetAccessPolicyResponse) RequestID() string { - return csapr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (csapr ContainerSetAccessPolicyResponse) Version() string { - return csapr.rawResponse.Header.Get("x-ms-version") -} - -// ContainerSetMetadataResponse ... -type ContainerSetMetadataResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (csmr ContainerSetMetadataResponse) Response() *http.Response { - return csmr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (csmr ContainerSetMetadataResponse) StatusCode() int { - return csmr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (csmr ContainerSetMetadataResponse) Status() string { - return csmr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (csmr ContainerSetMetadataResponse) ClientRequestID() string { - return csmr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (csmr ContainerSetMetadataResponse) Date() time.Time { - s := csmr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (csmr ContainerSetMetadataResponse) ErrorCode() string { - return csmr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (csmr ContainerSetMetadataResponse) ETag() ETag { - return ETag(csmr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (csmr ContainerSetMetadataResponse) LastModified() time.Time { - s := csmr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (csmr ContainerSetMetadataResponse) RequestID() string { - return csmr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (csmr ContainerSetMetadataResponse) Version() string { - return csmr.rawResponse.Header.Get("x-ms-version") -} - -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access -// resources in another domain. Web browsers implement a security restriction known as same-origin policy that -// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain -// (the origin domain) to call APIs in another domain -type CorsRule struct { - // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. - AllowedOrigins string `xml:"AllowedOrigins"` - // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) - AllowedMethods string `xml:"AllowedMethods"` - // AllowedHeaders - the request headers that the origin domain may specify on the CORS request. - AllowedHeaders string `xml:"AllowedHeaders"` - // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer - ExposedHeaders string `xml:"ExposedHeaders"` - // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request. - MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` -} - -// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is -// delimited text formatted. -type DelimitedTextConfiguration struct { - // ColumnSeparator - The string used to separate columns. - ColumnSeparator *string `xml:"ColumnSeparator"` - // FieldQuote - The string used to quote a specific field. - FieldQuote *string `xml:"FieldQuote"` - // RecordSeparator - The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` - // EscapeChar - The string used as an escape character. - EscapeChar *string `xml:"EscapeChar"` - // HeadersPresent - Represents whether the data has headers. - HeadersPresent *bool `xml:"HasHeaders"` -} - -// downloadResponse - Wraps the response from the blobClient.Download method. -type downloadResponse struct { - rawResponse *http.Response -} - -// NewMetadata returns user-defined key/value pairs. -func (dr downloadResponse) NewMetadata() Metadata { - md := Metadata{} - for k, v := range dr.rawResponse.Header { - if len(k) > mdPrefixLen { - if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { - md[strings.ToLower(k[mdPrefixLen:])] = v[0] - } - } - } - return md -} - -// Response returns the raw HTTP response object. -func (dr downloadResponse) Response() *http.Response { - return dr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (dr downloadResponse) StatusCode() int { - return dr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (dr downloadResponse) Status() string { - return dr.rawResponse.Status -} - -// Body returns the raw HTTP response object's Body. -func (dr downloadResponse) Body() io.ReadCloser { - return dr.rawResponse.Body -} - -// AcceptRanges returns the value for header Accept-Ranges. -func (dr downloadResponse) AcceptRanges() string { - return dr.rawResponse.Header.Get("Accept-Ranges") -} - -// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. -func (dr downloadResponse) BlobCommittedBlockCount() int32 { - s := dr.rawResponse.Header.Get("x-ms-blob-committed-block-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// BlobContentMD5 returns the value for header x-ms-blob-content-md5. -func (dr downloadResponse) BlobContentMD5() []byte { - s := dr.rawResponse.Header.Get("x-ms-blob-content-md5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (dr downloadResponse) BlobSequenceNumber() int64 { - s := dr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// BlobType returns the value for header x-ms-blob-type. -func (dr downloadResponse) BlobType() BlobType { - return BlobType(dr.rawResponse.Header.Get("x-ms-blob-type")) -} - -// CacheControl returns the value for header Cache-Control. -func (dr downloadResponse) CacheControl() string { - return dr.rawResponse.Header.Get("Cache-Control") -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (dr downloadResponse) ClientRequestID() string { - return dr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentCrc64 returns the value for header x-ms-content-crc64. -func (dr downloadResponse) ContentCrc64() []byte { - s := dr.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// ContentDisposition returns the value for header Content-Disposition. -func (dr downloadResponse) ContentDisposition() string { - return dr.rawResponse.Header.Get("Content-Disposition") -} - -// ContentEncoding returns the value for header Content-Encoding. -func (dr downloadResponse) ContentEncoding() string { - return dr.rawResponse.Header.Get("Content-Encoding") -} - -// ContentLanguage returns the value for header Content-Language. -func (dr downloadResponse) ContentLanguage() string { - return dr.rawResponse.Header.Get("Content-Language") -} - -// ContentLength returns the value for header Content-Length. -func (dr downloadResponse) ContentLength() int64 { - s := dr.rawResponse.Header.Get("Content-Length") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ContentMD5 returns the value for header Content-MD5. -func (dr downloadResponse) ContentMD5() []byte { - s := dr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// ContentRange returns the value for header Content-Range. -func (dr downloadResponse) ContentRange() string { - return dr.rawResponse.Header.Get("Content-Range") -} - -// ContentType returns the value for header Content-Type. -func (dr downloadResponse) ContentType() string { - return dr.rawResponse.Header.Get("Content-Type") -} - -// CopyCompletionTime returns the value for header x-ms-copy-completion-time. -func (dr downloadResponse) CopyCompletionTime() time.Time { - s := dr.rawResponse.Header.Get("x-ms-copy-completion-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// CopyID returns the value for header x-ms-copy-id. -func (dr downloadResponse) CopyID() string { - return dr.rawResponse.Header.Get("x-ms-copy-id") -} - -// CopyProgress returns the value for header x-ms-copy-progress. -func (dr downloadResponse) CopyProgress() string { - return dr.rawResponse.Header.Get("x-ms-copy-progress") -} - -// CopySource returns the value for header x-ms-copy-source. -func (dr downloadResponse) CopySource() string { - return dr.rawResponse.Header.Get("x-ms-copy-source") -} - -// CopyStatus returns the value for header x-ms-copy-status. -func (dr downloadResponse) CopyStatus() CopyStatusType { - return CopyStatusType(dr.rawResponse.Header.Get("x-ms-copy-status")) -} - -// CopyStatusDescription returns the value for header x-ms-copy-status-description. -func (dr downloadResponse) CopyStatusDescription() string { - return dr.rawResponse.Header.Get("x-ms-copy-status-description") -} - -// Date returns the value for header Date. -func (dr downloadResponse) Date() time.Time { - s := dr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (dr downloadResponse) EncryptionKeySha256() string { - return dr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (dr downloadResponse) EncryptionScope() string { - return dr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (dr downloadResponse) ErrorCode() string { - return dr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (dr downloadResponse) ETag() ETag { - return ETag(dr.rawResponse.Header.Get("ETag")) -} - -// ImmutabilityPolicyExpiresOn returns the value for header x-ms-immutability-policy-until-date. -func (dr downloadResponse) ImmutabilityPolicyExpiresOn() time.Time { - s := dr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. -func (dr downloadResponse) ImmutabilityPolicyMode() string { - return string(dr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) -} - -// IsCurrentVersion returns the value for header x-ms-is-current-version. -func (dr downloadResponse) IsCurrentVersion() string { - return dr.rawResponse.Header.Get("x-ms-is-current-version") -} - -// IsSealed returns the value for header x-ms-blob-sealed. -func (dr downloadResponse) IsSealed() string { - return dr.rawResponse.Header.Get("x-ms-blob-sealed") -} - -// IsServerEncrypted returns the value for header x-ms-server-encrypted. -func (dr downloadResponse) IsServerEncrypted() string { - return dr.rawResponse.Header.Get("x-ms-server-encrypted") -} - -// LastAccessed returns the value for header x-ms-last-access-time. -func (dr downloadResponse) LastAccessed() time.Time { - s := dr.rawResponse.Header.Get("x-ms-last-access-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LastModified returns the value for header Last-Modified. -func (dr downloadResponse) LastModified() time.Time { - s := dr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseDuration returns the value for header x-ms-lease-duration. -func (dr downloadResponse) LeaseDuration() LeaseDurationType { - return LeaseDurationType(dr.rawResponse.Header.Get("x-ms-lease-duration")) -} - -// LeaseState returns the value for header x-ms-lease-state. -func (dr downloadResponse) LeaseState() LeaseStateType { - return LeaseStateType(dr.rawResponse.Header.Get("x-ms-lease-state")) -} - -// LeaseStatus returns the value for header x-ms-lease-status. -func (dr downloadResponse) LeaseStatus() LeaseStatusType { - return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status")) -} - -// LegalHold returns the value for header x-ms-legal-hold. -func (dr downloadResponse) LegalHold() string { - return dr.rawResponse.Header.Get("x-ms-legal-hold") -} - -// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. -func (dr downloadResponse) ObjectReplicationPolicyID() string { - return dr.rawResponse.Header.Get("x-ms-or-policy-id") -} - -// ObjectReplicationRules returns the value for header x-ms-or. -func (dr downloadResponse) ObjectReplicationRules() string { - return dr.rawResponse.Header.Get("x-ms-or") -} - -// RequestID returns the value for header x-ms-request-id. -func (dr downloadResponse) RequestID() string { - return dr.rawResponse.Header.Get("x-ms-request-id") -} - -// TagCount returns the value for header x-ms-tag-count. -func (dr downloadResponse) TagCount() int64 { - s := dr.rawResponse.Header.Get("x-ms-tag-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// Version returns the value for header x-ms-version. -func (dr downloadResponse) Version() string { - return dr.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (dr downloadResponse) VersionID() string { - return dr.rawResponse.Header.Get("x-ms-version-id") -} - -// FilterBlobItem - Blob info from a Filter Blobs API call -type FilterBlobItem struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Blob"` - Name string `xml:"Name"` - ContainerName string `xml:"ContainerName"` - Tags *BlobTags `xml:"Tags"` -} - -// FilterBlobSegment - The result of a Filter Blobs API call -type FilterBlobSegment struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"EnumerationResults"` - ServiceEndpoint string `xml:"ServiceEndpoint,attr"` - Where string `xml:"Where"` - Blobs []FilterBlobItem `xml:"Blobs>Blob"` - NextMarker *string `xml:"NextMarker"` -} - -// Response returns the raw HTTP response object. -func (fbs FilterBlobSegment) Response() *http.Response { - return fbs.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (fbs FilterBlobSegment) StatusCode() int { - return fbs.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (fbs FilterBlobSegment) Status() string { - return fbs.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (fbs FilterBlobSegment) ClientRequestID() string { - return fbs.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (fbs FilterBlobSegment) Date() time.Time { - s := fbs.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (fbs FilterBlobSegment) ErrorCode() string { - return fbs.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (fbs FilterBlobSegment) RequestID() string { - return fbs.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (fbs FilterBlobSegment) Version() string { - return fbs.rawResponse.Header.Get("x-ms-version") -} - -// GeoReplication - Geo-Replication information for the Secondary Storage Service -type GeoReplication struct { - // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' - Status GeoReplicationStatusType `xml:"Status"` - // LastSyncTime - A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. - LastSyncTime time.Time `xml:"LastSyncTime"` -} - -// MarshalXML implements the xml.Marshaler interface for GeoReplication. -func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - gr2 := (*geoReplication)(unsafe.Pointer(&gr)) - return e.EncodeElement(*gr2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication. -func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - gr2 := (*geoReplication)(unsafe.Pointer(gr)) - return d.DecodeElement(gr2, &start) -} - -// JSONTextConfiguration - json text configuration -type JSONTextConfiguration struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"JsonTextConfiguration"` - // RecordSeparator - The string used to separate records. - RecordSeparator *string `xml:"RecordSeparator"` -} - -// KeyInfo - Key information -type KeyInfo struct { - // Start - The date-time the key is active in ISO 8601 UTC time - Start string `xml:"Start"` - // Expiry - The date-time the key expires in ISO 8601 UTC time - Expiry string `xml:"Expiry"` -} - -// ListBlobsFlatSegmentResponse - An enumeration of blobs -type ListBlobsFlatSegmentResponse struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"EnumerationResults"` - ServiceEndpoint string `xml:"ServiceEndpoint,attr"` - ContainerName string `xml:"ContainerName,attr"` - Prefix *string `xml:"Prefix"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - Segment BlobFlatListSegment `xml:"Blobs"` - NextMarker Marker `xml:"NextMarker"` -} - -// Response returns the raw HTTP response object. -func (lbfsr ListBlobsFlatSegmentResponse) Response() *http.Response { - return lbfsr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (lbfsr ListBlobsFlatSegmentResponse) StatusCode() int { - return lbfsr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (lbfsr ListBlobsFlatSegmentResponse) Status() string { - return lbfsr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (lbfsr ListBlobsFlatSegmentResponse) ClientRequestID() string { - return lbfsr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentType returns the value for header Content-Type. -func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string { - return lbfsr.rawResponse.Header.Get("Content-Type") -} - -// Date returns the value for header Date. -func (lbfsr ListBlobsFlatSegmentResponse) Date() time.Time { - s := lbfsr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (lbfsr ListBlobsFlatSegmentResponse) ErrorCode() string { - return lbfsr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (lbfsr ListBlobsFlatSegmentResponse) RequestID() string { - return lbfsr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (lbfsr ListBlobsFlatSegmentResponse) Version() string { - return lbfsr.rawResponse.Header.Get("x-ms-version") -} - -// ListBlobsHierarchySegmentResponse - An enumeration of blobs -type ListBlobsHierarchySegmentResponse struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"EnumerationResults"` - ServiceEndpoint string `xml:"ServiceEndpoint,attr"` - ContainerName string `xml:"ContainerName,attr"` - Prefix *string `xml:"Prefix"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - Delimiter *string `xml:"Delimiter"` - Segment BlobHierarchyListSegment `xml:"Blobs"` - NextMarker Marker `xml:"NextMarker"` -} - -// Response returns the raw HTTP response object. -func (lbhsr ListBlobsHierarchySegmentResponse) Response() *http.Response { - return lbhsr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (lbhsr ListBlobsHierarchySegmentResponse) StatusCode() int { - return lbhsr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (lbhsr ListBlobsHierarchySegmentResponse) Status() string { - return lbhsr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (lbhsr ListBlobsHierarchySegmentResponse) ClientRequestID() string { - return lbhsr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentType returns the value for header Content-Type. -func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string { - return lbhsr.rawResponse.Header.Get("Content-Type") -} - -// Date returns the value for header Date. -func (lbhsr ListBlobsHierarchySegmentResponse) Date() time.Time { - s := lbhsr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (lbhsr ListBlobsHierarchySegmentResponse) ErrorCode() string { - return lbhsr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (lbhsr ListBlobsHierarchySegmentResponse) RequestID() string { - return lbhsr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (lbhsr ListBlobsHierarchySegmentResponse) Version() string { - return lbhsr.rawResponse.Header.Get("x-ms-version") -} - -// ListContainersSegmentResponse - An enumeration of containers -type ListContainersSegmentResponse struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"EnumerationResults"` - ServiceEndpoint string `xml:"ServiceEndpoint,attr"` - Prefix *string `xml:"Prefix"` - Marker *string `xml:"Marker"` - MaxResults *int32 `xml:"MaxResults"` - ContainerItems []ContainerItem `xml:"Containers>Container"` - NextMarker Marker `xml:"NextMarker"` -} - -// Response returns the raw HTTP response object. -func (lcsr ListContainersSegmentResponse) Response() *http.Response { - return lcsr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (lcsr ListContainersSegmentResponse) StatusCode() int { - return lcsr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (lcsr ListContainersSegmentResponse) Status() string { - return lcsr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (lcsr ListContainersSegmentResponse) ClientRequestID() string { - return lcsr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (lcsr ListContainersSegmentResponse) ErrorCode() string { - return lcsr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (lcsr ListContainersSegmentResponse) RequestID() string { - return lcsr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (lcsr ListContainersSegmentResponse) Version() string { - return lcsr.rawResponse.Header.Get("x-ms-version") -} - -// Logging - Azure Analytics Logging settings. -type Logging struct { - // Version - The version of Storage Analytics to configure. - Version string `xml:"Version"` - // Delete - Indicates whether all delete requests should be logged. - Delete bool `xml:"Delete"` - // Read - Indicates whether all read requests should be logged. - Read bool `xml:"Read"` - // Write - Indicates whether all write requests should be logged. - Write bool `xml:"Write"` - RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` -} - -// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs -type Metrics struct { - // Version - The version of Storage Analytics to configure. - Version *string `xml:"Version"` - // Enabled - Indicates whether metrics are enabled for the Blob service. - Enabled bool `xml:"Enabled"` - // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations. - IncludeAPIs *bool `xml:"IncludeAPIs"` - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` -} - -// PageBlobClearPagesResponse ... -type PageBlobClearPagesResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (pbcpr PageBlobClearPagesResponse) Response() *http.Response { - return pbcpr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pbcpr PageBlobClearPagesResponse) StatusCode() int { - return pbcpr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pbcpr PageBlobClearPagesResponse) Status() string { - return pbcpr.rawResponse.Status -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 { - s := pbcpr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (pbcpr PageBlobClearPagesResponse) ClientRequestID() string { - return pbcpr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte { - s := pbcpr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (pbcpr PageBlobClearPagesResponse) Date() time.Time { - s := pbcpr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pbcpr PageBlobClearPagesResponse) ErrorCode() string { - return pbcpr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pbcpr PageBlobClearPagesResponse) ETag() ETag { - return ETag(pbcpr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (pbcpr PageBlobClearPagesResponse) LastModified() time.Time { - s := pbcpr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pbcpr PageBlobClearPagesResponse) RequestID() string { - return pbcpr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pbcpr PageBlobClearPagesResponse) Version() string { - return pbcpr.rawResponse.Header.Get("x-ms-version") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (pbcpr PageBlobClearPagesResponse) XMsContentCrc64() []byte { - s := pbcpr.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// PageBlobCopyIncrementalResponse ... -type PageBlobCopyIncrementalResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (pbcir PageBlobCopyIncrementalResponse) Response() *http.Response { - return pbcir.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pbcir PageBlobCopyIncrementalResponse) StatusCode() int { - return pbcir.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pbcir PageBlobCopyIncrementalResponse) Status() string { - return pbcir.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (pbcir PageBlobCopyIncrementalResponse) ClientRequestID() string { - return pbcir.rawResponse.Header.Get("x-ms-client-request-id") -} - -// CopyID returns the value for header x-ms-copy-id. -func (pbcir PageBlobCopyIncrementalResponse) CopyID() string { - return pbcir.rawResponse.Header.Get("x-ms-copy-id") -} - -// CopyStatus returns the value for header x-ms-copy-status. -func (pbcir PageBlobCopyIncrementalResponse) CopyStatus() CopyStatusType { - return CopyStatusType(pbcir.rawResponse.Header.Get("x-ms-copy-status")) -} - -// Date returns the value for header Date. -func (pbcir PageBlobCopyIncrementalResponse) Date() time.Time { - s := pbcir.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pbcir PageBlobCopyIncrementalResponse) ErrorCode() string { - return pbcir.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pbcir PageBlobCopyIncrementalResponse) ETag() ETag { - return ETag(pbcir.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (pbcir PageBlobCopyIncrementalResponse) LastModified() time.Time { - s := pbcir.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pbcir PageBlobCopyIncrementalResponse) RequestID() string { - return pbcir.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pbcir PageBlobCopyIncrementalResponse) Version() string { - return pbcir.rawResponse.Header.Get("x-ms-version") -} - -// PageBlobCreateResponse ... -type PageBlobCreateResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (pbcr PageBlobCreateResponse) Response() *http.Response { - return pbcr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pbcr PageBlobCreateResponse) StatusCode() int { - return pbcr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pbcr PageBlobCreateResponse) Status() string { - return pbcr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (pbcr PageBlobCreateResponse) ClientRequestID() string { - return pbcr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (pbcr PageBlobCreateResponse) ContentMD5() []byte { - s := pbcr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (pbcr PageBlobCreateResponse) Date() time.Time { - s := pbcr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (pbcr PageBlobCreateResponse) EncryptionKeySha256() string { - return pbcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (pbcr PageBlobCreateResponse) EncryptionScope() string { - return pbcr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pbcr PageBlobCreateResponse) ErrorCode() string { - return pbcr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pbcr PageBlobCreateResponse) ETag() ETag { - return ETag(pbcr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (pbcr PageBlobCreateResponse) IsServerEncrypted() string { - return pbcr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (pbcr PageBlobCreateResponse) LastModified() time.Time { - s := pbcr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pbcr PageBlobCreateResponse) RequestID() string { - return pbcr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pbcr PageBlobCreateResponse) Version() string { - return pbcr.rawResponse.Header.Get("x-ms-version") -} - -// VersionID returns the value for header x-ms-version-id. -func (pbcr PageBlobCreateResponse) VersionID() string { - return pbcr.rawResponse.Header.Get("x-ms-version-id") -} - -// PageBlobResizeResponse ... -type PageBlobResizeResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (pbrr PageBlobResizeResponse) Response() *http.Response { - return pbrr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pbrr PageBlobResizeResponse) StatusCode() int { - return pbrr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pbrr PageBlobResizeResponse) Status() string { - return pbrr.rawResponse.Status -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 { - s := pbrr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (pbrr PageBlobResizeResponse) ClientRequestID() string { - return pbrr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (pbrr PageBlobResizeResponse) Date() time.Time { - s := pbrr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pbrr PageBlobResizeResponse) ErrorCode() string { - return pbrr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pbrr PageBlobResizeResponse) ETag() ETag { - return ETag(pbrr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (pbrr PageBlobResizeResponse) LastModified() time.Time { - s := pbrr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pbrr PageBlobResizeResponse) RequestID() string { - return pbrr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pbrr PageBlobResizeResponse) Version() string { - return pbrr.rawResponse.Header.Get("x-ms-version") -} - -// PageBlobUpdateSequenceNumberResponse ... -type PageBlobUpdateSequenceNumberResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (pbusnr PageBlobUpdateSequenceNumberResponse) Response() *http.Response { - return pbusnr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pbusnr PageBlobUpdateSequenceNumberResponse) StatusCode() int { - return pbusnr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pbusnr PageBlobUpdateSequenceNumberResponse) Status() string { - return pbusnr.rawResponse.Status -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 { - s := pbusnr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (pbusnr PageBlobUpdateSequenceNumberResponse) ClientRequestID() string { - return pbusnr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time { - s := pbusnr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pbusnr PageBlobUpdateSequenceNumberResponse) ErrorCode() string { - return pbusnr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pbusnr PageBlobUpdateSequenceNumberResponse) ETag() ETag { - return ETag(pbusnr.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (pbusnr PageBlobUpdateSequenceNumberResponse) LastModified() time.Time { - s := pbusnr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pbusnr PageBlobUpdateSequenceNumberResponse) RequestID() string { - return pbusnr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pbusnr PageBlobUpdateSequenceNumberResponse) Version() string { - return pbusnr.rawResponse.Header.Get("x-ms-version") -} - -// PageBlobUploadPagesFromURLResponse ... -type PageBlobUploadPagesFromURLResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (pbupfur PageBlobUploadPagesFromURLResponse) Response() *http.Response { - return pbupfur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pbupfur PageBlobUploadPagesFromURLResponse) StatusCode() int { - return pbupfur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pbupfur PageBlobUploadPagesFromURLResponse) Status() string { - return pbupfur.rawResponse.Status -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (pbupfur PageBlobUploadPagesFromURLResponse) BlobSequenceNumber() int64 { - s := pbupfur.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ContentMD5 returns the value for header Content-MD5. -func (pbupfur PageBlobUploadPagesFromURLResponse) ContentMD5() []byte { - s := pbupfur.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (pbupfur PageBlobUploadPagesFromURLResponse) Date() time.Time { - s := pbupfur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionKeySha256() string { - return pbupfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionScope() string { - return pbupfur.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pbupfur PageBlobUploadPagesFromURLResponse) ErrorCode() string { - return pbupfur.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pbupfur PageBlobUploadPagesFromURLResponse) ETag() ETag { - return ETag(pbupfur.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (pbupfur PageBlobUploadPagesFromURLResponse) IsServerEncrypted() string { - return pbupfur.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (pbupfur PageBlobUploadPagesFromURLResponse) LastModified() time.Time { - s := pbupfur.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pbupfur PageBlobUploadPagesFromURLResponse) RequestID() string { - return pbupfur.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pbupfur PageBlobUploadPagesFromURLResponse) Version() string { - return pbupfur.rawResponse.Header.Get("x-ms-version") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (pbupfur PageBlobUploadPagesFromURLResponse) XMsContentCrc64() []byte { - s := pbupfur.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// PageBlobUploadPagesResponse ... -type PageBlobUploadPagesResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (pbupr PageBlobUploadPagesResponse) Response() *http.Response { - return pbupr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pbupr PageBlobUploadPagesResponse) StatusCode() int { - return pbupr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pbupr PageBlobUploadPagesResponse) Status() string { - return pbupr.rawResponse.Status -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 { - s := pbupr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (pbupr PageBlobUploadPagesResponse) ClientRequestID() string { - return pbupr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentMD5 returns the value for header Content-MD5. -func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte { - s := pbupr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// Date returns the value for header Date. -func (pbupr PageBlobUploadPagesResponse) Date() time.Time { - s := pbupr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (pbupr PageBlobUploadPagesResponse) EncryptionKeySha256() string { - return pbupr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (pbupr PageBlobUploadPagesResponse) EncryptionScope() string { - return pbupr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pbupr PageBlobUploadPagesResponse) ErrorCode() string { - return pbupr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pbupr PageBlobUploadPagesResponse) ETag() ETag { - return ETag(pbupr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. -func (pbupr PageBlobUploadPagesResponse) IsServerEncrypted() string { - return pbupr.rawResponse.Header.Get("x-ms-request-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (pbupr PageBlobUploadPagesResponse) LastModified() time.Time { - s := pbupr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pbupr PageBlobUploadPagesResponse) RequestID() string { - return pbupr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pbupr PageBlobUploadPagesResponse) Version() string { - return pbupr.rawResponse.Header.Get("x-ms-version") -} - -// XMsContentCrc64 returns the value for header x-ms-content-crc64. -func (pbupr PageBlobUploadPagesResponse) XMsContentCrc64() []byte { - s := pbupr.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// PageList - the list of pages -type PageList struct { - rawResponse *http.Response - PageRange []PageRange `xml:"PageRange"` - ClearRange []ClearRange `xml:"ClearRange"` - NextMarker Marker `xml:"NextMarker"` -} - -// Response returns the raw HTTP response object. -func (pl PageList) Response() *http.Response { - return pl.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pl PageList) StatusCode() int { - return pl.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pl PageList) Status() string { - return pl.rawResponse.Status -} - -// BlobContentLength returns the value for header x-ms-blob-content-length. -func (pl PageList) BlobContentLength() int64 { - s := pl.rawResponse.Header.Get("x-ms-blob-content-length") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (pl PageList) ClientRequestID() string { - return pl.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (pl PageList) Date() time.Time { - s := pl.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pl PageList) ErrorCode() string { - return pl.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (pl PageList) ETag() ETag { - return ETag(pl.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (pl PageList) LastModified() time.Time { - s := pl.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (pl PageList) RequestID() string { - return pl.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pl PageList) Version() string { - return pl.rawResponse.Header.Get("x-ms-version") -} - -// PageRange ... -type PageRange struct { - Start int64 `xml:"Start"` - End int64 `xml:"End"` -} - -// QueryFormat ... -type QueryFormat struct { - // Type - Possible values include: 'QueryFormatDelimited', 'QueryFormatJSON', 'QueryFormatArrow', 'QueryFormatParquet', 'QueryFormatNone' - Type QueryFormatType `xml:"Type"` - DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` - JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` - ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` - ParquetTextConfiguration map[string]interface{} `xml:"ParquetTextConfiguration"` -} - -// QueryRequest - Groups the set of query request settings. -type QueryRequest struct { - // QueryType - Required. The type of the provided query expression. - QueryType string `xml:"QueryType"` - // Expression - The query expression in SQL. The maximum size of the query expression is 256KiB. - Expression string `xml:"Expression"` - InputSerialization *QuerySerialization `xml:"InputSerialization"` - OutputSerialization *QuerySerialization `xml:"OutputSerialization"` -} - -// QueryResponse - Wraps the response from the blobClient.Query method. -type QueryResponse struct { - rawResponse *http.Response -} - -// NewMetadata returns user-defined key/value pairs. -func (qr QueryResponse) NewMetadata() Metadata { - md := Metadata{} - for k, v := range qr.rawResponse.Header { - if len(k) > mdPrefixLen { - if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { - md[strings.ToLower(k[mdPrefixLen:])] = v[0] - } - } - } - return md -} - -// Response returns the raw HTTP response object. -func (qr QueryResponse) Response() *http.Response { - return qr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (qr QueryResponse) StatusCode() int { - return qr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (qr QueryResponse) Status() string { - return qr.rawResponse.Status -} - -// Body returns the raw HTTP response object's Body. -func (qr QueryResponse) Body() io.ReadCloser { - return qr.rawResponse.Body -} - -// AcceptRanges returns the value for header Accept-Ranges. -func (qr QueryResponse) AcceptRanges() string { - return qr.rawResponse.Header.Get("Accept-Ranges") -} - -// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. -func (qr QueryResponse) BlobCommittedBlockCount() int32 { - s := qr.rawResponse.Header.Get("x-ms-blob-committed-block-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// BlobContentMD5 returns the value for header x-ms-blob-content-md5. -func (qr QueryResponse) BlobContentMD5() []byte { - s := qr.rawResponse.Header.Get("x-ms-blob-content-md5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (qr QueryResponse) BlobSequenceNumber() int64 { - s := qr.rawResponse.Header.Get("x-ms-blob-sequence-number") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// BlobType returns the value for header x-ms-blob-type. -func (qr QueryResponse) BlobType() BlobType { - return BlobType(qr.rawResponse.Header.Get("x-ms-blob-type")) -} - -// CacheControl returns the value for header Cache-Control. -func (qr QueryResponse) CacheControl() string { - return qr.rawResponse.Header.Get("Cache-Control") -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (qr QueryResponse) ClientRequestID() string { - return qr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ContentCrc64 returns the value for header x-ms-content-crc64. -func (qr QueryResponse) ContentCrc64() []byte { - s := qr.rawResponse.Header.Get("x-ms-content-crc64") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// ContentDisposition returns the value for header Content-Disposition. -func (qr QueryResponse) ContentDisposition() string { - return qr.rawResponse.Header.Get("Content-Disposition") -} - -// ContentEncoding returns the value for header Content-Encoding. -func (qr QueryResponse) ContentEncoding() string { - return qr.rawResponse.Header.Get("Content-Encoding") -} - -// ContentLanguage returns the value for header Content-Language. -func (qr QueryResponse) ContentLanguage() string { - return qr.rawResponse.Header.Get("Content-Language") -} - -// ContentLength returns the value for header Content-Length. -func (qr QueryResponse) ContentLength() int64 { - s := qr.rawResponse.Header.Get("Content-Length") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - i = 0 - } - return i -} - -// ContentMD5 returns the value for header Content-MD5. -func (qr QueryResponse) ContentMD5() []byte { - s := qr.rawResponse.Header.Get("Content-MD5") - if s == "" { - return nil - } - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b = nil - } - return b -} - -// ContentRange returns the value for header Content-Range. -func (qr QueryResponse) ContentRange() string { - return qr.rawResponse.Header.Get("Content-Range") -} - -// ContentType returns the value for header Content-Type. -func (qr QueryResponse) ContentType() string { - return qr.rawResponse.Header.Get("Content-Type") -} - -// CopyCompletionTime returns the value for header x-ms-copy-completion-time. -func (qr QueryResponse) CopyCompletionTime() time.Time { - s := qr.rawResponse.Header.Get("x-ms-copy-completion-time") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// CopyID returns the value for header x-ms-copy-id. -func (qr QueryResponse) CopyID() string { - return qr.rawResponse.Header.Get("x-ms-copy-id") -} - -// CopyProgress returns the value for header x-ms-copy-progress. -func (qr QueryResponse) CopyProgress() string { - return qr.rawResponse.Header.Get("x-ms-copy-progress") -} - -// CopySource returns the value for header x-ms-copy-source. -func (qr QueryResponse) CopySource() string { - return qr.rawResponse.Header.Get("x-ms-copy-source") -} - -// CopyStatus returns the value for header x-ms-copy-status. -func (qr QueryResponse) CopyStatus() CopyStatusType { - return CopyStatusType(qr.rawResponse.Header.Get("x-ms-copy-status")) -} - -// CopyStatusDescription returns the value for header x-ms-copy-status-description. -func (qr QueryResponse) CopyStatusDescription() string { - return qr.rawResponse.Header.Get("x-ms-copy-status-description") -} - -// Date returns the value for header Date. -func (qr QueryResponse) Date() time.Time { - s := qr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. -func (qr QueryResponse) EncryptionKeySha256() string { - return qr.rawResponse.Header.Get("x-ms-encryption-key-sha256") -} - -// EncryptionScope returns the value for header x-ms-encryption-scope. -func (qr QueryResponse) EncryptionScope() string { - return qr.rawResponse.Header.Get("x-ms-encryption-scope") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (qr QueryResponse) ErrorCode() string { - return qr.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (qr QueryResponse) ETag() ETag { - return ETag(qr.rawResponse.Header.Get("ETag")) -} - -// IsServerEncrypted returns the value for header x-ms-server-encrypted. -func (qr QueryResponse) IsServerEncrypted() string { - return qr.rawResponse.Header.Get("x-ms-server-encrypted") -} - -// LastModified returns the value for header Last-Modified. -func (qr QueryResponse) LastModified() time.Time { - s := qr.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// LeaseDuration returns the value for header x-ms-lease-duration. -func (qr QueryResponse) LeaseDuration() LeaseDurationType { - return LeaseDurationType(qr.rawResponse.Header.Get("x-ms-lease-duration")) -} - -// LeaseState returns the value for header x-ms-lease-state. -func (qr QueryResponse) LeaseState() LeaseStateType { - return LeaseStateType(qr.rawResponse.Header.Get("x-ms-lease-state")) -} - -// LeaseStatus returns the value for header x-ms-lease-status. -func (qr QueryResponse) LeaseStatus() LeaseStatusType { - return LeaseStatusType(qr.rawResponse.Header.Get("x-ms-lease-status")) -} - -// RequestID returns the value for header x-ms-request-id. -func (qr QueryResponse) RequestID() string { - return qr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (qr QueryResponse) Version() string { - return qr.rawResponse.Header.Get("x-ms-version") -} - -// QuerySerialization ... -type QuerySerialization struct { - Format QueryFormat `xml:"Format"` -} - -// RetentionPolicy - the retention policy which determines how long the associated data should persist -type RetentionPolicy struct { - // Enabled - Indicates whether a retention policy is enabled for the storage service - Enabled bool `xml:"Enabled"` - // Days - Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted - Days *int32 `xml:"Days"` - // AllowPermanentDelete - Indicates whether permanent delete is allowed on this storage account. - AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` -} - -// ServiceGetAccountInfoResponse ... -type ServiceGetAccountInfoResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (sgair ServiceGetAccountInfoResponse) Response() *http.Response { - return sgair.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (sgair ServiceGetAccountInfoResponse) StatusCode() int { - return sgair.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (sgair ServiceGetAccountInfoResponse) Status() string { - return sgair.rawResponse.Status -} - -// AccountKind returns the value for header x-ms-account-kind. -func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType { - return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind")) -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (sgair ServiceGetAccountInfoResponse) ClientRequestID() string { - return sgair.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (sgair ServiceGetAccountInfoResponse) Date() time.Time { - s := sgair.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (sgair ServiceGetAccountInfoResponse) ErrorCode() string { - return sgair.rawResponse.Header.Get("x-ms-error-code") -} - -// IsHierarchicalNamespaceEnabled returns the value for header x-ms-is-hns-enabled. -func (sgair ServiceGetAccountInfoResponse) IsHierarchicalNamespaceEnabled() string { - return sgair.rawResponse.Header.Get("x-ms-is-hns-enabled") -} - -// RequestID returns the value for header x-ms-request-id. -func (sgair ServiceGetAccountInfoResponse) RequestID() string { - return sgair.rawResponse.Header.Get("x-ms-request-id") -} - -// SkuName returns the value for header x-ms-sku-name. -func (sgair ServiceGetAccountInfoResponse) SkuName() SkuNameType { - return SkuNameType(sgair.rawResponse.Header.Get("x-ms-sku-name")) -} - -// Version returns the value for header x-ms-version. -func (sgair ServiceGetAccountInfoResponse) Version() string { - return sgair.rawResponse.Header.Get("x-ms-version") -} - -// ServiceSetPropertiesResponse ... -type ServiceSetPropertiesResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (sspr ServiceSetPropertiesResponse) Response() *http.Response { - return sspr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (sspr ServiceSetPropertiesResponse) StatusCode() int { - return sspr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (sspr ServiceSetPropertiesResponse) Status() string { - return sspr.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (sspr ServiceSetPropertiesResponse) ClientRequestID() string { - return sspr.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (sspr ServiceSetPropertiesResponse) ErrorCode() string { - return sspr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (sspr ServiceSetPropertiesResponse) RequestID() string { - return sspr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (sspr ServiceSetPropertiesResponse) Version() string { - return sspr.rawResponse.Header.Get("x-ms-version") -} - -// SignedIdentifier - signed identifier -type SignedIdentifier struct { - // ID - a unique id - ID string `xml:"Id"` - AccessPolicy AccessPolicy `xml:"AccessPolicy"` -} - -// SignedIdentifiers - Wraps the response from the containerClient.GetAccessPolicy method. -type SignedIdentifiers struct { - rawResponse *http.Response - Items []SignedIdentifier `xml:"SignedIdentifier"` -} - -// Response returns the raw HTTP response object. -func (si SignedIdentifiers) Response() *http.Response { - return si.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (si SignedIdentifiers) StatusCode() int { - return si.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (si SignedIdentifiers) Status() string { - return si.rawResponse.Status -} - -// BlobPublicAccess returns the value for header x-ms-blob-public-access. -func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType { - return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access")) -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (si SignedIdentifiers) ClientRequestID() string { - return si.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (si SignedIdentifiers) Date() time.Time { - s := si.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (si SignedIdentifiers) ErrorCode() string { - return si.rawResponse.Header.Get("x-ms-error-code") -} - -// ETag returns the value for header ETag. -func (si SignedIdentifiers) ETag() ETag { - return ETag(si.rawResponse.Header.Get("ETag")) -} - -// LastModified returns the value for header Last-Modified. -func (si SignedIdentifiers) LastModified() time.Time { - s := si.rawResponse.Header.Get("Last-Modified") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// RequestID returns the value for header x-ms-request-id. -func (si SignedIdentifiers) RequestID() string { - return si.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (si SignedIdentifiers) Version() string { - return si.rawResponse.Header.Get("x-ms-version") -} - -// StaticWebsite - The properties that enable an account to host a static website -type StaticWebsite struct { - // Enabled - Indicates whether this account is hosting a static website - Enabled bool `xml:"Enabled"` - // IndexDocument - The default name of the index page under each directory - IndexDocument *string `xml:"IndexDocument"` - // ErrorDocument404Path - The absolute path of the custom 404 page - ErrorDocument404Path *string `xml:"ErrorDocument404Path"` - // DefaultIndexDocumentPath - Absolute path of the default index page - DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` -} - -// // StorageError ... -// type StorageError struct { -// Message *string `xml:"Message"` -// } - -// StorageServiceProperties - Storage Service Properties. -type StorageServiceProperties struct { - rawResponse *http.Response - Logging *Logging `xml:"Logging"` - HourMetrics *Metrics `xml:"HourMetrics"` - MinuteMetrics *Metrics `xml:"MinuteMetrics"` - // Cors - The set of CORS rules. - Cors []CorsRule `xml:"Cors>CorsRule"` - // DefaultServiceVersion - The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions - DefaultServiceVersion *string `xml:"DefaultServiceVersion"` - DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` - StaticWebsite *StaticWebsite `xml:"StaticWebsite"` -} - -// Response returns the raw HTTP response object. -func (ssp StorageServiceProperties) Response() *http.Response { - return ssp.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (ssp StorageServiceProperties) StatusCode() int { - return ssp.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (ssp StorageServiceProperties) Status() string { - return ssp.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (ssp StorageServiceProperties) ClientRequestID() string { - return ssp.rawResponse.Header.Get("x-ms-client-request-id") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (ssp StorageServiceProperties) ErrorCode() string { - return ssp.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (ssp StorageServiceProperties) RequestID() string { - return ssp.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (ssp StorageServiceProperties) Version() string { - return ssp.rawResponse.Header.Get("x-ms-version") -} - -// StorageServiceStats - Stats for the storage service. -type StorageServiceStats struct { - rawResponse *http.Response - GeoReplication *GeoReplication `xml:"GeoReplication"` -} - -// Response returns the raw HTTP response object. -func (sss StorageServiceStats) Response() *http.Response { - return sss.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (sss StorageServiceStats) StatusCode() int { - return sss.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (sss StorageServiceStats) Status() string { - return sss.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (sss StorageServiceStats) ClientRequestID() string { - return sss.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (sss StorageServiceStats) Date() time.Time { - s := sss.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (sss StorageServiceStats) ErrorCode() string { - return sss.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (sss StorageServiceStats) RequestID() string { - return sss.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (sss StorageServiceStats) Version() string { - return sss.rawResponse.Header.Get("x-ms-version") -} - -// SubmitBatchResponse - Wraps the response from the containerClient.SubmitBatch method. -type SubmitBatchResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (sbr SubmitBatchResponse) Response() *http.Response { - return sbr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (sbr SubmitBatchResponse) StatusCode() int { - return sbr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (sbr SubmitBatchResponse) Status() string { - return sbr.rawResponse.Status -} - -// Body returns the raw HTTP response object's Body. -func (sbr SubmitBatchResponse) Body() io.ReadCloser { - return sbr.rawResponse.Body -} - -// ContentType returns the value for header Content-Type. -func (sbr SubmitBatchResponse) ContentType() string { - return sbr.rawResponse.Header.Get("Content-Type") -} - -// ErrorCode returns the value for header x-ms-error-code. -func (sbr SubmitBatchResponse) ErrorCode() string { - return sbr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (sbr SubmitBatchResponse) RequestID() string { - return sbr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (sbr SubmitBatchResponse) Version() string { - return sbr.rawResponse.Header.Get("x-ms-version") -} - -// UserDelegationKey - A user delegation key -type UserDelegationKey struct { - rawResponse *http.Response - // SignedOid - The Azure Active Directory object ID in GUID format. - SignedOid string `xml:"SignedOid"` - // SignedTid - The Azure Active Directory tenant ID in GUID format - SignedTid string `xml:"SignedTid"` - // SignedStart - The date-time the key is active - SignedStart time.Time `xml:"SignedStart"` - // SignedExpiry - The date-time the key expires - SignedExpiry time.Time `xml:"SignedExpiry"` - // SignedService - Abbreviation of the Azure Storage service that accepts the key - SignedService string `xml:"SignedService"` - // SignedVersion - The service version that created the key - SignedVersion string `xml:"SignedVersion"` - // Value - The key as a base64 string - Value string `xml:"Value"` -} - -// MarshalXML implements the xml.Marshaler interface for UserDelegationKey. -func (udk UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - udk2 := (*userDelegationKey)(unsafe.Pointer(&udk)) - return e.EncodeElement(*udk2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for UserDelegationKey. -func (udk *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - udk2 := (*userDelegationKey)(unsafe.Pointer(udk)) - return d.DecodeElement(udk2, &start) -} - -// Response returns the raw HTTP response object. -func (udk UserDelegationKey) Response() *http.Response { - return udk.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (udk UserDelegationKey) StatusCode() int { - return udk.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (udk UserDelegationKey) Status() string { - return udk.rawResponse.Status -} - -// ClientRequestID returns the value for header x-ms-client-request-id. -func (udk UserDelegationKey) ClientRequestID() string { - return udk.rawResponse.Header.Get("x-ms-client-request-id") -} - -// Date returns the value for header Date. -func (udk UserDelegationKey) Date() time.Time { - s := udk.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (udk UserDelegationKey) ErrorCode() string { - return udk.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (udk UserDelegationKey) RequestID() string { - return udk.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (udk UserDelegationKey) Version() string { - return udk.rawResponse.Header.Get("x-ms-version") -} - -func init() { - if reflect.TypeOf((*UserDelegationKey)(nil)).Elem().Size() != reflect.TypeOf((*userDelegationKey)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between UserDelegationKey and userDelegationKey")) - } - if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between AccessPolicy and accessPolicy")) - } - if reflect.TypeOf((*BlobPropertiesInternal)(nil)).Elem().Size() != reflect.TypeOf((*blobPropertiesInternal)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between BlobPropertiesInternal and blobPropertiesInternal")) - } - if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between ContainerProperties and containerProperties")) - } - if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between GeoReplication and geoReplication")) - } -} - -const ( - rfc3339Format = "2006-01-02T15:04:05Z" -) - -// used to convert times from UTC to GMT before sending across the wire -var gmt = time.FixedZone("GMT", 0) - -// internal type used for marshalling time in RFC1123 format -type timeRFC1123 struct { - time.Time -} - -// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123. -func (t timeRFC1123) MarshalText() ([]byte, error) { - return []byte(t.Format(time.RFC1123)), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123. -func (t *timeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = time.Parse(time.RFC1123, string(data)) - return -} - -// internal type used for marshalling time in RFC3339 format -type timeRFC3339 struct { - time.Time -} - -// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339. -func (t timeRFC3339) MarshalText() ([]byte, error) { - return []byte(t.Format(rfc3339Format)), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339. -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - t.Time, err = time.Parse(rfc3339Format, string(data)) - return -} - -// internal type used for marshalling base64 encoded strings -type base64Encoded struct { - b []byte -} - -// MarshalText implements the encoding.TextMarshaler interface for base64Encoded. -func (c base64Encoded) MarshalText() ([]byte, error) { - return []byte(base64.StdEncoding.EncodeToString(c.b)), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for base64Encoded. -func (c *base64Encoded) UnmarshalText(data []byte) error { - b, err := base64.StdEncoding.DecodeString(string(data)) - if err != nil { - return err - } - c.b = b - return nil -} - -// internal type used for marshalling -type userDelegationKey struct { - rawResponse *http.Response - SignedOid string `xml:"SignedOid"` - SignedTid string `xml:"SignedTid"` - SignedStart timeRFC3339 `xml:"SignedStart"` - SignedExpiry timeRFC3339 `xml:"SignedExpiry"` - SignedService string `xml:"SignedService"` - SignedVersion string `xml:"SignedVersion"` - Value string `xml:"Value"` -} - -// internal type used for marshalling -type accessPolicy struct { - Start *timeRFC3339 `xml:"Start"` - Expiry *timeRFC3339 `xml:"Expiry"` - Permission *string `xml:"Permission"` -} - -// internal type used for marshalling -type blobPropertiesInternal struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Properties"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - LastModified timeRFC1123 `xml:"Last-Modified"` - Etag ETag `xml:"Etag"` - ContentLength *int64 `xml:"Content-Length"` - ContentType *string `xml:"Content-Type"` - ContentEncoding *string `xml:"Content-Encoding"` - ContentLanguage *string `xml:"Content-Language"` - ContentMD5 base64Encoded `xml:"Content-MD5"` - ContentDisposition *string `xml:"Content-Disposition"` - CacheControl *string `xml:"Cache-Control"` - BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` - BlobType BlobType `xml:"BlobType"` - LeaseStatus LeaseStatusType `xml:"LeaseStatus"` - LeaseState LeaseStateType `xml:"LeaseState"` - LeaseDuration LeaseDurationType `xml:"LeaseDuration"` - CopyID *string `xml:"CopyId"` - CopyStatus CopyStatusType `xml:"CopyStatus"` - CopySource *string `xml:"CopySource"` - CopyProgress *string `xml:"CopyProgress"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CopyStatusDescription *string `xml:"CopyStatusDescription"` - ServerEncrypted *bool `xml:"ServerEncrypted"` - IncrementalCopy *bool `xml:"IncrementalCopy"` - DestinationSnapshot *string `xml:"DestinationSnapshot"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` - AccessTier AccessTierType `xml:"AccessTier"` - AccessTierInferred *bool `xml:"AccessTierInferred"` - ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` - CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` - EncryptionScope *string `xml:"EncryptionScope"` - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - TagCount *int32 `xml:"TagCount"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - IsSealed *bool `xml:"Sealed"` - RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - ImmutabilityPolicyMode BlobImmutabilityPolicyModeType `xml:"ImmutabilityPolicyMode"` - LegalHold *bool `xml:"LegalHold"` - Owner *string `xml:"Owner"` - Group *string `xml:"Group"` - Permissions *string `xml:"Permissions"` - ACL *string `xml:"Acl"` -} - -// internal type used for marshalling -type containerProperties struct { - LastModified timeRFC1123 `xml:"Last-Modified"` - Etag ETag `xml:"Etag"` - LeaseStatus LeaseStatusType `xml:"LeaseStatus"` - LeaseState LeaseStateType `xml:"LeaseState"` - LeaseDuration LeaseDurationType `xml:"LeaseDuration"` - PublicAccess PublicAccessType `xml:"PublicAccess"` - HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` - HasLegalHold *bool `xml:"HasLegalHold"` - DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` - PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` - IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` -} - -// internal type used for marshalling -type geoReplication struct { - Status GeoReplicationStatusType `xml:"Status"` - LastSyncTime timeRFC1123 `xml:"LastSyncTime"` -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go deleted file mode 100644 index 01a81fdfb1c..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go +++ /dev/null @@ -1,1065 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "context" - "encoding/base64" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "time" -) - -// pageBlobClient is the client for the PageBlob methods of the Azblob service. -type pageBlobClient struct { - managementClient -} - -// newPageBlobClient creates an instance of the pageBlobClient client. -func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { - return pageBlobClient{newManagementClient(url, p)} -} - -// ClearPages the Clear Pages operation clears a set of pages from a page blob -// -// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more -// information, see Setting -// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified -// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not -// specified, encryption is performed with the root account encryption key. For more information, see Encryption at -// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be -// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the -// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key -// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption -// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default -// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. -// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number -// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob -// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate -// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only -// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the -// analytics logs when storage analytics logging is enabled. -func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobClearPagesResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageBlobClearPagesResponse), err -} - -// clearPagesPreparer prepares the ClearPages request. -func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "page") - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if rangeParameter != nil { - req.Header.Set("x-ms-range", *rangeParameter) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifSequenceNumberLessThanOrEqualTo != nil { - req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) - } - if ifSequenceNumberLessThan != nil { - req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) - } - if ifSequenceNumberEqualTo != nil { - req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-page-write", "clear") - return req, nil -} - -// clearPagesResponder handles the response to the ClearPages request. -func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err -} - -// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. -// The snapshot is copied such that only the differential changes between the previously copied snapshot are -// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or -// copied from as usual. This API is supported since REST version 2016-05-31. -// -// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that -// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob -// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is -// expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only -// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate -// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobCopyIncrementalResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageBlobCopyIncrementalResponse), err -} - -// copyIncrementalPreparer prepares the CopyIncremental request. -func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "incrementalcopy") - req.URL.RawQuery = params.Encode() - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-copy-source", copySource) - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// copyIncrementalResponder handles the response to the CopyIncremental request. -func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err -} - -// Create the Create operation creates a new page blob. -// -// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page -// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is -// expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. tier is optional. Indicates the tier to be set on the page blob. -// blobContentType is optional. Sets the blob's content type. If specified, this property is stored with the blob and -// returned with a read request. blobContentEncoding is optional. Sets the blob's content encoding. If specified, this -// property is stored with the blob and returned with a read request. blobContentLanguage is optional. Set the blob's -// content language. If specified, this property is stored with the blob and returned with a read request. -// blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for -// the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets the blob's cache -// control. If specified, this property is stored with the blob and returned with a read request. metadata is optional. -// Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the -// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value -// pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from -// the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules -// for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. leaseID is if -// specified, the operation only succeeds if the resource's lease is active and matches this ID. blobContentDisposition -// is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies the encryption key to -// use to encrypt the data provided in the request. If not specified, encryption is performed with the root account -// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the -// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. -// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is -// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version -// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the -// request. If not specified, encryption is performed with the default account encryption scope. For more information, -// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can -// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. -// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. -// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a -// legal hold should be set on the blob. -func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*PageBlobCreateResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageBlobCreateResponse), err -} - -// createPreparer prepares the Create request. -func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if tier != PremiumPageBlobAccessTierNone { - req.Header.Set("x-ms-access-tier", string(tier)) - } - if blobContentType != nil { - req.Header.Set("x-ms-blob-content-type", *blobContentType) - } - if blobContentEncoding != nil { - req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) - } - if blobContentLanguage != nil { - req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) - } - if blobContentMD5 != nil { - req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) - } - if blobCacheControl != nil { - req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) - } - if metadata != nil { - for k, v := range metadata { - req.Header.Set("x-ms-meta-"+k, v) - } - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if blobContentDisposition != nil { - req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) - if blobSequenceNumber != nil { - req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if blobTagsString != nil { - req.Header.Set("x-ms-tags", *blobTagsString) - } - if immutabilityPolicyExpiry != nil { - req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) - } - if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { - req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) - } - if legalHold != nil { - req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) - } - req.Header.Set("x-ms-blob-type", "PageBlob") - return req, nil -} - -// createResponder handles the response to the Create request. -func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &PageBlobCreateResponse{rawResponse: resp.Response()}, err -} - -// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a -// page blob -// -// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to -// retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified -// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL -// where clause on blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, -// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is -// enabled. -func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageList), err -} - -// getPageRangesPreparer prepares the GetPageRanges request. -func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "pagelist") - req.URL.RawQuery = params.Encode() - if rangeParameter != nil { - req.Header.Set("x-ms-range", *rangeParameter) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getPageRangesResponder handles the response to the GetPageRanges request. -func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &PageList{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// GetPageRangesDiff the Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were -// changed between target blob and previous snapshot. -// -// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to -// retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot -// parameter is a DateTime value that specifies that the response will contain only pages that were changed between -// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a -// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots -// are currently supported only for blobs created on or after January 1, 2016. prevSnapshotURL is optional. This header -// is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the -// target blob. The response will only contain pages that were changed between the target blob and its previous -// snapshot. rangeParameter is return only the bytes of the blob in the specified range. leaseID is if specified, the -// operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is specify this -// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is -// specify this header value to operate only on a blob if it has not been modified since the specified date/time. -// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag -// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to -// operate only on blobs with a matching value. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, prevSnapshotURL, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageList), err -} - -// getPageRangesDiffPreparer prepares the GetPageRangesDiff request. -func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if snapshot != nil && len(*snapshot) > 0 { - params.Set("snapshot", *snapshot) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - if prevsnapshot != nil && len(*prevsnapshot) > 0 { - params.Set("prevsnapshot", *prevsnapshot) - } - params.Set("comp", "pagelist") - req.URL.RawQuery = params.Encode() - if prevSnapshotURL != nil { - req.Header.Set("x-ms-previous-snapshot-url", *prevSnapshotURL) - } - if rangeParameter != nil { - req.Header.Set("x-ms-range", *rangeParameter) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request. -func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &PageList{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// Resize resize the Blob -// -// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must -// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information, -// see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the -// data provided in the request. If not specified, encryption is performed with the root account encryption key. For -// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the -// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the -// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided -// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies -// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is -// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage -// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the -// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been -// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching -// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a -// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobResizeResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageBlobResizeResponse), err -} - -// resizePreparer prepares the Resize request. -func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// resizeResponder handles the response to the Resize request. -func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &PageBlobResizeResponse{rawResponse: resp.Response()}, err -} - -// UpdateSequenceNumber update the sequence number of the blob -// -// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property -// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout -// is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it -// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on -// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. blobSequenceNumber -// is set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The -// value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value -// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageBlobUpdateSequenceNumberResponse), err -} - -// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request. -func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) - if blobSequenceNumber != nil { - req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request. -func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err -} - -// UploadPages the Upload Pages operation writes a range of pages to a page blob -// -// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an -// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the -// body, to be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to -// be validated by the service. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified -// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not -// specified, encryption is performed with the root account encryption key. For more information, see Encryption at -// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be -// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the -// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key -// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption -// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default -// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. -// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number -// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob -// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate -// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only -// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value -// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching -// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the -// analytics logs when storage analytics logging is enabled. -func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobUploadPagesResponse, error) { - if err := validate([]validation{ - {targetValue: body, - constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageBlobUploadPagesResponse), err -} - -// uploadPagesPreparer prepares the UploadPages request. -func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, body) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "page") - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - if transactionalContentMD5 != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) - } - if transactionalContentCrc64 != nil { - req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) - } - if rangeParameter != nil { - req.Header.Set("x-ms-range", *rangeParameter) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if ifSequenceNumberLessThanOrEqualTo != nil { - req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) - } - if ifSequenceNumberLessThan != nil { - req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) - } - if ifSequenceNumberEqualTo != nil { - req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - req.Header.Set("x-ms-page-write", "update") - return req, nil -} - -// uploadPagesResponder handles the response to the UploadPages request. -func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err -} - -// UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read -// from a URL -// -// sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The -// length of this range should match the ContentLength header and x-ms-range/Range destination range header. -// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be -// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated -// for the range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated -// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in -// seconds. For more information, see Setting -// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt -// the data provided in the request. If not specified, encryption is performed with the root account encryption key. -// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of -// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is -// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be -// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. -// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, -// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for -// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and -// matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has -// a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to -// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this -// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this -// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is -// specify this header value to operate only on a blob if it has not been modified since the specified date/time. -// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag -// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to -// operate only on blobs with a matching value. sourceIfModifiedSince is specify this header value to operate only on a -// blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to -// operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag -// value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on -// blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit -// that is recorded in the analytics logs when storage analytics logging is enabled. copySourceAuthorization is only -// Bearer type is supported. Credentials should be a valid OAuth access token to copy source. -func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*PageBlobUploadPagesFromURLResponse, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req) - if err != nil { - return nil, err - } - return resp.(*PageBlobUploadPagesFromURLResponse), err -} - -// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request. -func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "page") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-copy-source", sourceURL) - req.Header.Set("x-ms-source-range", sourceRange) - if sourceContentMD5 != nil { - req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) - } - if sourceContentcrc64 != nil { - req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) - } - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Header.Set("x-ms-range", rangeParameter) - if encryptionKey != nil { - req.Header.Set("x-ms-encryption-key", *encryptionKey) - } - if encryptionKeySha256 != nil { - req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) - } - if encryptionAlgorithm != EncryptionAlgorithmNone { - req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) - } - if encryptionScope != nil { - req.Header.Set("x-ms-encryption-scope", *encryptionScope) - } - if leaseID != nil { - req.Header.Set("x-ms-lease-id", *leaseID) - } - if ifSequenceNumberLessThanOrEqualTo != nil { - req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) - } - if ifSequenceNumberLessThan != nil { - req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) - } - if ifSequenceNumberEqualTo != nil { - req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) - } - if ifModifiedSince != nil { - req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifUnmodifiedSince != nil { - req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if ifMatch != nil { - req.Header.Set("If-Match", string(*ifMatch)) - } - if ifNoneMatch != nil { - req.Header.Set("If-None-Match", string(*ifNoneMatch)) - } - if ifTags != nil { - req.Header.Set("x-ms-if-tags", *ifTags) - } - if sourceIfModifiedSince != nil { - req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfUnmodifiedSince != nil { - req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) - } - if sourceIfMatch != nil { - req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) - } - if sourceIfNoneMatch != nil { - req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) - } - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - if copySourceAuthorization != nil { - req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) - } - req.Header.Set("x-ms-page-write", "update") - return req, nil -} - -// uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request. -func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusCreated) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go deleted file mode 100644 index 8a023d0a02c..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go +++ /dev/null @@ -1,74 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io/ioutil" -) - -type responder func(resp pipeline.Response) (result pipeline.Response, err error) - -// ResponderPolicyFactory is a Factory capable of creating a responder pipeline. -type responderPolicyFactory struct { - responder responder -} - -// New creates a responder policy factory. -func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return responderPolicy{next: next, responder: arpf.responder} -} - -type responderPolicy struct { - next pipeline.Policy - responder responder -} - -// Do sends the request to the service and validates/deserializes the HTTP response. -func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - resp, err := arp.next.Do(ctx, request) - if err != nil { - return resp, err - } - return arp.responder(resp) -} - -// validateResponse checks an HTTP response's status code against a legal set of codes. -// If the response code is not legal, then validateResponse reads all of the response's body -// (containing error information) and returns a response error. -func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { - if resp == nil { - return NewResponseError(nil, nil, "nil response") - } - responseCode := resp.Response().StatusCode - for _, i := range successStatusCodes { - if i == responseCode { - return nil - } - } - // only close the body in the failure case. in the - // success case responders will close the body as required. - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return err - } - // the service code, description and details will be populated during unmarshalling - responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) - if len(b) > 0 { - if err = xml.Unmarshal(b, &responseError); err != nil { - return NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return responseError -} - -// removes any BOM from the byte slice -func removeBOM(b []byte) []byte { - // UTF8 - return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go deleted file mode 100644 index 3dcc75bb52b..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go +++ /dev/null @@ -1,95 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "fmt" - "github.com/Azure/azure-pipeline-go/pipeline" - "net" - "net/http" -) - -// if you want to provide custom error handling set this variable to your constructor function -var responseErrorFactory func(cause error, response *http.Response, description string) error - -// ResponseError identifies a responder-generated network or response parsing error. -type ResponseError interface { - // Error exposes the Error(), Temporary() and Timeout() methods. - net.Error // Includes the Go error interface - // Response returns the HTTP response. You may examine this but you should not modify it. - Response() *http.Response -} - -// NewResponseError creates an error object that implements the error interface. -func NewResponseError(cause error, response *http.Response, description string) error { - if responseErrorFactory != nil { - return responseErrorFactory(cause, response, description) - } - return &responseError{ - ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), - response: response, - description: description, - } -} - -// responseError is the internal struct that implements the public ResponseError interface. -type responseError struct { - pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause - response *http.Response - description string -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e *responseError) Error() string { - b := &bytes.Buffer{} - fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) - fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) - s := b.String() - return e.ErrorNode.Error(s) -} - -// Response implements the ResponseError interface's method to return the HTTP response. -func (e *responseError) Response() *http.Response { - return e.response -} - -// RFC7807 PROBLEM ------------------------------------------------------------------------------------ -// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members. -/*type RFC7807Problem struct { - // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation). - typeURI string // Should default to "about:blank" - // Optional: Short, human-readable summary (maybe localized). - title string - // Optional: HTTP status code generated by the origin server - status int - // Optional: Human-readable explanation for this problem occurance. - // Should help client correct the problem. Clients should NOT parse this string. - detail string - // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced). - instance string -} -// NewRFC7807Problem ... -func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { - return &RFC7807Problem{ - typeURI: typeURI, - status: status, - title: fmt.Sprintf(titleFormat, a...), - } -} -// Error returns the error information as a string. -func (e *RFC7807Problem) Error() string { - return e.title -} -// TypeURI ... -func (e *RFC7807Problem) TypeURI() string { - if e.typeURI == "" { - e.typeURI = "about:blank" - } - return e.typeURI -} -// Members ... -func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { - return e.status, e.title, e.detail, e.instance -}*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go deleted file mode 100644 index 3072da0e64f..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go +++ /dev/null @@ -1,618 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// serviceClient is the client for the Service methods of the Azblob service. -type serviceClient struct { - managementClient -} - -// newServiceClient creates an instance of the serviceClient client. -func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { - return serviceClient{newManagementClient(url, p)} -} - -// FilterBlobs the Filter Blobs operation enables callers to list blobs across all containers whose tags match a given -// search expression. Filter blobs searches across all containers within a storage account but can be scoped within -// the expression to a single container. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. where is filters -// the results to return only to return only blobs whose tags match the specified expression. marker is a string value -// that identifies the portion of the list of containers to be returned with the next listing operation. The operation -// returns the NextMarker value within the response body if the listing operation did not return all containers -// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter -// in a subsequent call to request the next page of list items. The marker value is opaque to the client. maxresults is -// specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a -// value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a -// partition boundary, then the service will return a continuation token for retrieving the remainder of the results. -// For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the -// default of 5000. -func (client serviceClient) FilterBlobs(ctx context.Context, timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (*FilterBlobSegment, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, - {targetValue: maxresults, - constraints: []constraint{{target: "maxresults", name: null, rule: false, - chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.filterBlobsPreparer(timeout, requestID, where, marker, maxresults) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.filterBlobsResponder}, req) - if err != nil { - return nil, err - } - return resp.(*FilterBlobSegment), err -} - -// filterBlobsPreparer prepares the FilterBlobs request. -func (client serviceClient) filterBlobsPreparer(timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - if where != nil && len(*where) > 0 { - params.Set("where", *where) - } - if marker != nil && len(*marker) > 0 { - params.Set("marker", *marker) - } - if maxresults != nil { - params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) - } - params.Set("comp", "blobs") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// filterBlobsResponder handles the response to the FilterBlobs request. -func (client serviceClient) filterBlobsResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &FilterBlobSegment{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// GetAccountInfo returns the sku name and account kind -func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { - req, err := client.getAccountInfoPreparer() - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ServiceGetAccountInfoResponse), err -} - -// getAccountInfoPreparer prepares the GetAccountInfo request. -func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - params.Set("restype", "account") - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - return req, nil -} - -// getAccountInfoResponder handles the response to the GetAccountInfo request. -func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err -} - -// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getPropertiesPreparer(timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) - if err != nil { - return nil, err - } - return resp.(*StorageServiceProperties), err -} - -// getPropertiesPreparer prepares the GetProperties request. -func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "service") - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getPropertiesResponder handles the response to the GetProperties request. -func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &StorageServiceProperties{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the -// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. -// -// timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getStatisticsPreparer(timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) - if err != nil { - return nil, err - } - return resp.(*StorageServiceStats), err -} - -// getStatisticsPreparer prepares the GetStatistics request. -func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "service") - params.Set("comp", "stats") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// getStatisticsResponder handles the response to the GetStatistics request. -func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &StorageServiceStats{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// GetUserDelegationKey retrieves a user delegation key for the Blob service. This is only a valid operation when using -// bearer token authentication. -// -// keyInfo is key information timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, timeout *int32, requestID *string) (*UserDelegationKey, error) { - if err := validate([]validation{ - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.getUserDelegationKeyPreparer(keyInfo, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getUserDelegationKeyResponder}, req) - if err != nil { - return nil, err - } - return resp.(*UserDelegationKey), err -} - -// getUserDelegationKeyPreparer prepares the GetUserDelegationKey request. -func (client serviceClient) getUserDelegationKeyPreparer(keyInfo KeyInfo, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("POST", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "service") - params.Set("comp", "userdelegationkey") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - b, err := xml.Marshal(keyInfo) - if err != nil { - return req, pipeline.NewError(err, "failed to marshal request body") - } - req.Header.Set("Content-Type", "application/xml") - err = req.SetBody(bytes.NewReader(b)) - if err != nil { - return req, pipeline.NewError(err, "failed to set request body") - } - return req, nil -} - -// getUserDelegationKeyResponder handles the response to the GetUserDelegationKey request. -func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &UserDelegationKey{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified -// account -// -// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a -// string value that identifies the portion of the list of containers to be returned with the next listing operation. -// The operation returns the NextMarker value within the response body if the listing operation did not return all -// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the -// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the -// client. maxresults is specifies the maximum number of containers to return. If the request does not specify -// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the -// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the -// remainder of the results. For this reason, it is possible that the service will return fewer results than specified -// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's -// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For -// more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { - if err := validate([]validation{ - {targetValue: maxresults, - constraints: []constraint{{target: "maxresults", name: null, rule: false, - chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ListContainersSegmentResponse), err -} - -// listContainersSegmentPreparer prepares the ListContainersSegment request. -func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("GET", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if prefix != nil && len(*prefix) > 0 { - params.Set("prefix", *prefix) - } - if marker != nil && len(*marker) > 0 { - params.Set("marker", *marker) - } - if maxresults != nil { - params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) - } - if include != nil && len(include) > 0 { - params.Set("include", joinConst(include, ",")) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "list") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// listContainersSegmentResponder handles the response to the ListContainersSegment request. -func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &ListContainersSegmentResponse{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules -// -// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds. -// For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { - if err := validate([]validation{ - {targetValue: storageServiceProperties, - constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, - chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, - }}, - }}, - {target: "storageServiceProperties.HourMetrics", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, - }}, - }}, - {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, - }}, - }}, - {target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, - chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, - }}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) - if err != nil { - return nil, err - } - return resp.(*ServiceSetPropertiesResponse), err -} - -// setPropertiesPreparer prepares the SetProperties request. -func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("PUT", client.url, nil) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("restype", "service") - params.Set("comp", "properties") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - b, err := xml.Marshal(storageServiceProperties) - if err != nil { - return req, pipeline.NewError(err, "failed to marshal request body") - } - req.Header.Set("Content-Type", "application/xml") - err = req.SetBody(bytes.NewReader(b)) - if err != nil { - return req, pipeline.NewError(err, "failed to set request body") - } - return req, nil -} - -// setPropertiesResponder handles the response to the SetProperties request. -func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK, http.StatusAccepted) - if resp == nil { - return nil, err - } - io.Copy(ioutil.Discard, resp.Response().Body) - resp.Response().Body.Close() - return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err -} - -// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. -// -// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an -// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be -// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the -// timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client serviceClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { - if err := validate([]validation{ - {targetValue: body, - constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, - {targetValue: timeout, - constraints: []constraint{{target: "timeout", name: null, rule: false, - chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { - return nil, err - } - req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) - if err != nil { - return nil, err - } - resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) - if err != nil { - return nil, err - } - return resp.(*SubmitBatchResponse), err -} - -// submitBatchPreparer prepares the SubmitBatch request. -func (client serviceClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { - req, err := pipeline.NewRequest("POST", client.url, body) - if err != nil { - return req, pipeline.NewError(err, "failed to create request") - } - params := req.URL.Query() - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "batch") - req.URL.RawQuery = params.Encode() - req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) - req.Header.Set("Content-Type", multipartContentType) - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// submitBatchResponder handles the response to the SubmitBatch request. -func (client serviceClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - return &SubmitBatchResponse{rawResponse: resp.Response()}, err -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go deleted file mode 100644 index 98a2614e606..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go +++ /dev/null @@ -1,367 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "fmt" - "github.com/Azure/azure-pipeline-go/pipeline" - "reflect" - "regexp" - "strings" -) - -// Constraint stores constraint name, target field name -// Rule and chain validations. -type constraint struct { - // Target field name for validation. - target string - - // Constraint name e.g. minLength, MaxLength, Pattern, etc. - name string - - // Rule for constraint e.g. greater than 10, less than 5 etc. - rule interface{} - - // Chain validations for struct type - chain []constraint -} - -// Validation stores parameter-wise validation. -type validation struct { - targetValue interface{} - constraints []constraint -} - -// Constraint list -const ( - empty = "Empty" - null = "Null" - readOnly = "ReadOnly" - pattern = "Pattern" - maxLength = "MaxLength" - minLength = "MinLength" - maxItems = "MaxItems" - minItems = "MinItems" - multipleOf = "MultipleOf" - uniqueItems = "UniqueItems" - inclusiveMaximum = "InclusiveMaximum" - exclusiveMaximum = "ExclusiveMaximum" - exclusiveMinimum = "ExclusiveMinimum" - inclusiveMinimum = "InclusiveMinimum" -) - -// Validate method validates constraints on parameter -// passed in validation array. -func validate(m []validation) error { - for _, item := range m { - v := reflect.ValueOf(item.targetValue) - for _, constraint := range item.constraints { - var err error - switch v.Kind() { - case reflect.Ptr: - err = validatePtr(v, constraint) - case reflect.String: - err = validateString(v, constraint) - case reflect.Struct: - err = validateStruct(v, constraint) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = validateInt(v, constraint) - case reflect.Float32, reflect.Float64: - err = validateFloat(v, constraint) - case reflect.Array, reflect.Slice, reflect.Map: - err = validateArrayMap(v, constraint) - default: - err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) - } - if err != nil { - return err - } - } - } - return nil -} - -func validateStruct(x reflect.Value, v constraint, name ...string) error { - //Get field name from target name which is in format a.b.c - s := strings.Split(v.target, ".") - f := x.FieldByName(s[len(s)-1]) - if isZero(f) { - return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) - } - err := validate([]validation{ - { - targetValue: getInterfaceValue(f), - constraints: []constraint{v}, - }, - }) - return err -} - -func validatePtr(x reflect.Value, v constraint) error { - if v.name == readOnly { - if !x.IsNil() { - return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") - } - return nil - } - if x.IsNil() { - return checkNil(x, v) - } - if v.chain != nil { - return validate([]validation{ - { - targetValue: getInterfaceValue(x.Elem()), - constraints: v.chain, - }, - }) - } - return nil -} - -func validateInt(x reflect.Value, v constraint) error { - i := x.Int() - r, ok := v.rule.(int) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) - } - switch v.name { - case multipleOf: - if i%int64(r) != 0 { - return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) - } - case exclusiveMinimum: - if i <= int64(r) { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case exclusiveMaximum: - if i >= int64(r) { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case inclusiveMinimum: - if i < int64(r) { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case inclusiveMaximum: - if i > int64(r) { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) - } - return nil -} - -func validateFloat(x reflect.Value, v constraint) error { - f := x.Float() - r, ok := v.rule.(float64) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) - } - switch v.name { - case exclusiveMinimum: - if f <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case exclusiveMaximum: - if f >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case inclusiveMinimum: - if f < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case inclusiveMaximum: - if f > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) - } - return nil -} - -func validateString(x reflect.Value, v constraint) error { - s := x.String() - switch v.name { - case empty: - if len(s) == 0 { - return checkEmpty(x, v) - } - case pattern: - reg, err := regexp.Compile(v.rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - if !reg.MatchString(s) { - return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) - } - case maxLength: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) - } - if len(s) > v.rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) - } - case minLength: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) - } - if len(s) < v.rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) - } - case readOnly: - if len(s) > 0 { - return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) - } - if v.chain != nil { - return validate([]validation{ - { - targetValue: getInterfaceValue(x), - constraints: v.chain, - }, - }) - } - return nil -} - -func validateArrayMap(x reflect.Value, v constraint) error { - switch v.name { - case null: - if x.IsNil() { - return checkNil(x, v) - } - case empty: - if x.IsNil() || x.Len() == 0 { - return checkEmpty(x, v) - } - case maxItems: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) - } - if x.Len() > v.rule.(int) { - return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) - } - case minItems: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) - } - if x.Len() < v.rule.(int) { - return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) - } - case uniqueItems: - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - if !checkForUniqueInArray(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) - } - } else if x.Kind() == reflect.Map { - if !checkForUniqueInMap(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) - } - } else { - return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) - } - case readOnly: - if x.Len() != 0 { - return createError(x, v, "readonly parameter; must send as nil or empty in request") - } - case pattern: - reg, err := regexp.Compile(v.rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - keys := x.MapKeys() - for _, k := range keys { - if !reg.MatchString(k.String()) { - return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) - } - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) - } - if v.chain != nil { - return validate([]validation{ - { - targetValue: getInterfaceValue(x), - constraints: v.chain, - }, - }) - } - return nil -} - -func checkNil(x reflect.Value, v constraint) error { - if _, ok := v.rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) - } - if v.rule.(bool) { - return createError(x, v, "value can not be null; required parameter") - } - return nil -} - -func checkEmpty(x reflect.Value, v constraint) error { - if _, ok := v.rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) - } - if v.rule.(bool) { - return createError(x, v, "value can not be null or empty; required parameter") - } - return nil -} - -func checkForUniqueInArray(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - arrOfInterface := make([]interface{}, x.Len()) - for i := 0; i < x.Len(); i++ { - arrOfInterface[i] = x.Index(i).Interface() - } - m := make(map[interface{}]bool) - for _, val := range arrOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func checkForUniqueInMap(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - mapOfInterface := make(map[interface{}]interface{}, x.Len()) - keys := x.MapKeys() - for _, k := range keys { - mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() - } - m := make(map[interface{}]bool) - for _, val := range mapOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func getInterfaceValue(x reflect.Value) interface{} { - if x.Kind() == reflect.Invalid { - return nil - } - return x.Interface() -} - -func isZero(x interface{}) bool { - return x == reflect.Zero(reflect.TypeOf(x)).Interface() -} - -func createError(x reflect.Value, v constraint, message string) error { - return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", - v.target, v.name, getInterfaceValue(x), message)) -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go deleted file mode 100644 index d7672a1dfd2..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go +++ /dev/null @@ -1,14 +0,0 @@ -package azblob - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/0.0.0 azblob/2020-10-02" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return "0.0.0" -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go deleted file mode 100644 index 45be7e0fdc7..00000000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go +++ /dev/null @@ -1,240 +0,0 @@ -package azblob - -import ( - "context" - "io" - "net/http" - "time" -) - -// BlobHTTPHeaders contains read/writeable blob properties. -type BlobHTTPHeaders struct { - ContentType string - ContentMD5 []byte - ContentEncoding string - ContentLanguage string - ContentDisposition string - CacheControl string -} - -// NewHTTPHeaders returns the user-modifiable properties for this blob. -func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { - return BlobHTTPHeaders{ - ContentType: bgpr.ContentType(), - ContentEncoding: bgpr.ContentEncoding(), - ContentLanguage: bgpr.ContentLanguage(), - ContentDisposition: bgpr.ContentDisposition(), - CacheControl: bgpr.CacheControl(), - ContentMD5: bgpr.ContentMD5(), - } -} - -// ///////////////////////////////////////////////////////////////////////////// - -// NewHTTPHeaders returns the user-modifiable properties for this blob. -func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { - return BlobHTTPHeaders{ - ContentType: dr.ContentType(), - ContentEncoding: dr.ContentEncoding(), - ContentLanguage: dr.ContentLanguage(), - ContentDisposition: dr.ContentDisposition(), - CacheControl: dr.CacheControl(), - ContentMD5: dr.ContentMD5(), - } -} - -// ///////////////////////////////////////////////////////////////////////////// - -// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry. -type DownloadResponse struct { - r *downloadResponse - ctx context.Context - b BlobURL - getInfo HTTPGetterInfo -} - -// Body constructs new RetryReader stream for reading data. If a connection failes -// while reading, it will make additional requests to reestablish a connection and -// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 -// (the default), returns the original response body and no retries will be performed. -func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { - if o.MaxRetryRequests == 0 { // No additional retries - return r.Response().Body - } - return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, - func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { - resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, BlobAccessConditions{ - ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, - }, false, o.ClientProvidedKeyOptions) - if err != nil { - return nil, err - } - return resp.Response(), err - }, - ) -} - -// Response returns the raw HTTP response object. -func (r DownloadResponse) Response() *http.Response { - return r.r.Response() -} - -// NewHTTPHeaders returns the user-modifiable properties for this blob. -func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { - return r.r.NewHTTPHeaders() -} - -// BlobContentMD5 returns the value for header x-ms-blob-content-md5. -func (r DownloadResponse) BlobContentMD5() []byte { - return r.r.BlobContentMD5() -} - -// ContentMD5 returns the value for header Content-MD5. -func (r DownloadResponse) ContentMD5() []byte { - return r.r.ContentMD5() -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (r DownloadResponse) StatusCode() int { - return r.r.StatusCode() -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (r DownloadResponse) Status() string { - return r.r.Status() -} - -// AcceptRanges returns the value for header Accept-Ranges. -func (r DownloadResponse) AcceptRanges() string { - return r.r.AcceptRanges() -} - -// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. -func (r DownloadResponse) BlobCommittedBlockCount() int32 { - return r.r.BlobCommittedBlockCount() -} - -// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. -func (r DownloadResponse) BlobSequenceNumber() int64 { - return r.r.BlobSequenceNumber() -} - -// BlobType returns the value for header x-ms-blob-type. -func (r DownloadResponse) BlobType() BlobType { - return r.r.BlobType() -} - -// CacheControl returns the value for header Cache-Control. -func (r DownloadResponse) CacheControl() string { - return r.r.CacheControl() -} - -// ContentDisposition returns the value for header Content-Disposition. -func (r DownloadResponse) ContentDisposition() string { - return r.r.ContentDisposition() -} - -// ContentEncoding returns the value for header Content-Encoding. -func (r DownloadResponse) ContentEncoding() string { - return r.r.ContentEncoding() -} - -// ContentLanguage returns the value for header Content-Language. -func (r DownloadResponse) ContentLanguage() string { - return r.r.ContentLanguage() -} - -// ContentLength returns the value for header Content-Length. -func (r DownloadResponse) ContentLength() int64 { - return r.r.ContentLength() -} - -// ContentRange returns the value for header Content-Range. -func (r DownloadResponse) ContentRange() string { - return r.r.ContentRange() -} - -// ContentType returns the value for header Content-Type. -func (r DownloadResponse) ContentType() string { - return r.r.ContentType() -} - -// CopyCompletionTime returns the value for header x-ms-copy-completion-time. -func (r DownloadResponse) CopyCompletionTime() time.Time { - return r.r.CopyCompletionTime() -} - -// CopyID returns the value for header x-ms-copy-id. -func (r DownloadResponse) CopyID() string { - return r.r.CopyID() -} - -// CopyProgress returns the value for header x-ms-copy-progress. -func (r DownloadResponse) CopyProgress() string { - return r.r.CopyProgress() -} - -// CopySource returns the value for header x-ms-copy-source. -func (r DownloadResponse) CopySource() string { - return r.r.CopySource() -} - -// CopyStatus returns the value for header x-ms-copy-status. -func (r DownloadResponse) CopyStatus() CopyStatusType { - return r.r.CopyStatus() -} - -// CopyStatusDescription returns the value for header x-ms-copy-status-description. -func (r DownloadResponse) CopyStatusDescription() string { - return r.r.CopyStatusDescription() -} - -// Date returns the value for header Date. -func (r DownloadResponse) Date() time.Time { - return r.r.Date() -} - -// ETag returns the value for header ETag. -func (r DownloadResponse) ETag() ETag { - return ETag(r.r.ETag()) -} - -// IsServerEncrypted returns the value for header x-ms-server-encrypted. -func (r DownloadResponse) IsServerEncrypted() string { - return r.r.IsServerEncrypted() -} - -// LastModified returns the value for header Last-Modified. -func (r DownloadResponse) LastModified() time.Time { - return r.r.LastModified() -} - -// LeaseDuration returns the value for header x-ms-lease-duration. -func (r DownloadResponse) LeaseDuration() LeaseDurationType { - return r.r.LeaseDuration() -} - -// LeaseState returns the value for header x-ms-lease-state. -func (r DownloadResponse) LeaseState() LeaseStateType { - return r.r.LeaseState() -} - -// LeaseStatus returns the value for header x-ms-lease-status. -func (r DownloadResponse) LeaseStatus() LeaseStatusType { - return r.r.LeaseStatus() -} - -// RequestID returns the value for header x-ms-request-id. -func (r DownloadResponse) RequestID() string { - return r.r.RequestID() -} - -// Version returns the value for header x-ms-version. -func (r DownloadResponse) Version() string { - return r.r.Version() -} - -// NewMetadata returns user-defined key/value pairs. -func (r DownloadResponse) NewMetadata() Metadata { - return r.r.NewMetadata() -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/parsing_urls.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/parsing_urls.go deleted file mode 100644 index 671e845ad3e..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/parsing_urls.go +++ /dev/null @@ -1,95 +0,0 @@ -package azqueue - -import ( - "errors" - "net/url" - "strings" -) - -// A QueueURLParts object represents the components that make up an Azure Storage Queue URL. You parse an -// existing URL into its parts by calling NewQueueURLParts(). You construct a URL from parts by calling URL(). -// NOTE: Changing any SAS-related field requires computing a new SAS signature. -type QueueURLParts struct { - Scheme string // Ex: "https://" - Host string // Ex: "account.queue.core.windows.net" - QueueName string // "" if no queue name - Messages bool // true if "/messages" was/should be in URL - MessageID MessageID - SAS SASQueryParameters - UnparsedParams string -} - -// NewQueueURLParts parses a URL initializing QueueURLParts' fields including any SAS-related query parameters. Any other -// query parameters remain in the UnparsedParams field. This method overwrites all fields in the QueueURLParts object. -func NewQueueURLParts(u url.URL) QueueURLParts { - up := QueueURLParts{ - Scheme: u.Scheme, - Host: u.Host, - } - - // Full path example: /queue-name/messages/messageID - // Find the queue name (if any) - if u.Path != "" { - path := u.Path - if path[0] == '/' { - path = path[1:] // If path starts with a slash, remove it - } - - components := strings.Split(path, "/") - if len(components) > 0 { - up.QueueName = components[0] - if len(components) > 1 { - up.Messages = true - if len(components) > 2 { - up.MessageID = MessageID(components[2]) - } - } - } - } - - // Convert the query parameters to a case-sensitive map & trim whitsapce - paramsMap := u.Query() - up.SAS = newSASQueryParameters(paramsMap, true) - up.UnparsedParams = paramsMap.Encode() - return up -} - -// URL returns a URL object whose fields are initialized from the QueueURLParts fields. The URL's RawQuery -// field contains the SAS and unparsed query parameters. -func (up QueueURLParts) URL() (url.URL, error) { - if up.MessageID != "" && !up.Messages || up.QueueName == "" { - return url.URL{}, errors.New("can't produce a URL with a messageID but without a queue name or Messages") - } - if up.MessageID == "" && up.Messages && up.QueueName == "" { - return url.URL{}, errors.New("can't produce a URL with Messages but without a queue name ") - } - - path := "" - // Concatenate queue name (if it exists) - if up.QueueName != "" { - path += "/" + up.QueueName - if up.Messages { - path += "/messages" - } - if up.MessageID != "" { - path += "/" + string(up.MessageID) - } - } - - rawQuery := up.UnparsedParams - - sas := up.SAS.Encode() - if sas != "" { - if len(rawQuery) > 0 { - rawQuery += "&" - } - rawQuery += sas - } - u := url.URL{ - Scheme: up.Scheme, - Host: up.Host, - Path: path, - RawQuery: rawQuery, - } - return u, nil -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/sas_service.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/sas_service.go deleted file mode 100644 index 29e2a7321c0..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/sas_service.go +++ /dev/null @@ -1,110 +0,0 @@ -package azqueue - -import ( - "fmt" - "strings" - "time" -) - -// QueueSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage queue. -type QueueSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a QueueSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Identifier string `param:"si"` - QueueName string -} - -// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce -// the proper SAS query parameters. -func (v QueueSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) SASQueryParameters { - if v.Version == "" { - v.Version = SASVersion - } - startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) - - // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx - stringToSign := strings.Join([]string{ - v.Permissions, - startTime, - expiryTime, - getCanonicalName(sharedKeyCredential.AccountName(), v.QueueName), - v.Identifier, - v.IPRange.String(), - string(v.Protocol), - v.Version}, - "\n") - signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) - - p := SASQueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - - // Queue-specific SAS parameters - resource: "q", - identifier: v.Identifier, - - // Calculated SAS signature - signature: signature, - } - return p -} - -// getCanonicalName computes the canonical name for a queue resource for SAS signing. -func getCanonicalName(account string, queueName string) string { - elements := []string{"/queue/", account, "/", queueName} - return strings.Join(elements, "") -} - -// The QueueSASPermissions type simplifies creating the permissions string for an Azure Storage queue SAS. -// Initialize an instance of this type and then call its String method to set QueueSASSignatureValues's Permissions field. -type QueueSASPermissions struct { - Read, Add, Update, Process bool -} - -// String produces the SAS permissions string for an Azure Storage queue. -// Call this method to set QueueSASSignatureValues's Permissions field. -func (p QueueSASPermissions) String() string { - var b strings.Builder - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Update { - b.WriteRune('u') - } - if p.Process { - b.WriteRune('p') - } - return b.String() -} - -// Parse initializes the QueueSASPermissions's fields from a string. -func (p *QueueSASPermissions) Parse(s string) error { - *p = QueueSASPermissions{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'u': - p.Update = true - case 'p': - p.Process = true - default: - return fmt.Errorf("Invalid permission: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/service_codes_queue.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/service_codes_queue.go deleted file mode 100644 index 8e0dbcfccae..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/service_codes_queue.go +++ /dev/null @@ -1,33 +0,0 @@ -package azqueue - -// https://docs.microsoft.com/en-us/rest/api/storageservices/queue-service-error-codes - -// ServiceCode values indicate a service failure. -const ( - // The specified marker is invalid (400). - ServiceCodeInvalidMarker ServiceCodeType = "InvalidMarker" - - // The specified message does not exist (404). - ServiceCodeMessageNotFound ServiceCodeType = "MessageNotFound" - - // The message exceeds the maximum allowed size (400). - ServiceCodeMessageTooLarge ServiceCodeType = "MessageTooLarge" - - // The specified pop receipt did not match the pop receipt for a dequeued message (400). - ServiceCodePopReceiptMismatch ServiceCodeType = "PopReceiptMismatch" - - // The specified queue already exists (409). - ServiceCodeQueueAlreadyExists ServiceCodeType = "QueueAlreadyExists" - - // The specified queue is being deleted (409). - ServiceCodeQueueBeingDeleted ServiceCodeType = "QueueBeingDeleted" - - // The specified queue has been disabled by the administrator (409). - ServiceCodeQueueDisabled ServiceCodeType = "QueueDisabled" - - // The specified queue is not empty (409). - ServiceCodeQueueNotEmpty ServiceCodeType = "QueueNotEmpty" - - // The specified queue does not exist (404). - ServiceCodeQueueNotFound ServiceCodeType = "QueueNotFound" -) diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messageid.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messageid.go deleted file mode 100644 index 72d348bade9..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messageid.go +++ /dev/null @@ -1,99 +0,0 @@ -package azqueue - -import ( - "context" - "github.com/Azure/azure-pipeline-go/pipeline" - "net/http" - "net/url" - "time" -) - -// A MessageIDURL represents a URL to a specific Azure Storage Queue message allowing you to manipulate the message. -type MessageIDURL struct { - client messageIDClient -} - -// NewMessageIDURL creates a MessageIDURL object using the specified URL and request policy pipeline. -func NewMessageIDURL(url url.URL, p pipeline.Pipeline) MessageIDURL { - client := newMessageIDClient(url, p) - return MessageIDURL{client: client} -} - -// URL returns the URL endpoint used by the MessageIDURL object. -func (m MessageIDURL) URL() url.URL { - return m.client.URL() -} - -// String returns the URL as a string. -func (m MessageIDURL) String() string { - u := m.URL() - return u.String() -} - -// WithPipeline creates a new MessageIDURL object identical to the source but with the specified request policy pipeline. -func (m MessageIDURL) WithPipeline(p pipeline.Pipeline) MessageIDURL { - return NewMessageIDURL(m.URL(), p) -} - -// Delete permanently removes the specified message from its queue. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-message2. -func (m MessageIDURL) Delete(ctx context.Context, popReceipt PopReceipt) (*MessageIDDeleteResponse, error) { - return m.client.Delete(ctx, string(popReceipt), nil, nil) -} - -// Update changes a message's visibility timeout and contents. The message content must be a UTF-8 encoded string that is up to 64KB in size. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/update-message. -func (m MessageIDURL) Update(ctx context.Context, popReceipt PopReceipt, visibilityTimeout time.Duration, message string) (*UpdatedMessageResponse, error) { - r, err := m.client.Update(ctx, QueueMessage{MessageText: message}, string(popReceipt), - int32(visibilityTimeout.Seconds()), nil, nil) - - if err != nil { - return nil, err - } - - return &UpdatedMessageResponse{ - inner: r, - PopReceipt: PopReceipt(r.PopReceipt()), - TimeNextVisible: r.TimeNextVisible(), - }, err -} - -type UpdatedMessageResponse struct { - inner *MessageIDUpdateResponse - - // PopReceipt returns the value for header x-ms-popreceipt. - PopReceipt PopReceipt - - // TimeNextVisible returns the value for header x-ms-time-next-visible. - TimeNextVisible time.Time -} - -// Response returns the raw HTTP response object. -func (miur UpdatedMessageResponse) Response() *http.Response { - return miur.inner.Response() -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (miur UpdatedMessageResponse) StatusCode() int { - return miur.inner.StatusCode() -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (miur UpdatedMessageResponse) Status() string { - return miur.inner.Status() -} - -// Date returns the value for header Date. -func (miur UpdatedMessageResponse) Date() time.Time { - return miur.inner.Date() -} - -// RequestID returns the value for header x-ms-request-id. -func (miur UpdatedMessageResponse) RequestID() string { - return miur.inner.RequestID() -} - -// Version returns the value for header x-ms-version. -func (miur UpdatedMessageResponse) Version() string { - return miur.inner.Version() -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messages.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messages.go deleted file mode 100644 index 9a8c5795520..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_messages.go +++ /dev/null @@ -1,297 +0,0 @@ -package azqueue - -import ( - "context" - "net/url" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" - "net/http" -) - -// MessageID represents a Message ID as a string. -type MessageID string - -// String returns a MessageID as a string -func (id MessageID) String() string { return string(id) } - -/////////////////////////////////////////////////////////////////////////////// - -// PopReceipt represents a Message's opaque pop receipt. -type PopReceipt string - -// String returns a PopReceipt as a string -func (pr PopReceipt) String() string { return string(pr) } - -/////////////////////////////////////////////////////////////////////////////// - -// A MessagesURL represents a URL to an Azure Storage Queue's messages allowing you to manipulate its messages. -type MessagesURL struct { - client messagesClient -} - -// NewMessageURL creates a MessagesURL object using the specified URL and request policy pipeline. -func NewMessagesURL(url url.URL, p pipeline.Pipeline) MessagesURL { - client := newMessagesClient(url, p) - return MessagesURL{client: client} -} - -// URL returns the URL endpoint used by the MessagesURL object. -func (m MessagesURL) URL() url.URL { - return m.client.URL() -} - -// String returns the URL as a string. -func (m MessagesURL) String() string { - u := m.URL() - return u.String() -} - -// WithPipeline creates a new MessagesURL object identical to the source but with the specified request policy pipeline. -func (m MessagesURL) WithPipeline(p pipeline.Pipeline) MessagesURL { - return NewMessagesURL(m.URL(), p) -} - -// NewMessageIDURL creates a new MessageIDURL object by concatenating messageID to the end of -// MessagesURL's URL. The new MessageIDURL uses the same request policy pipeline as the MessagesURL. -// To change the pipeline, create the MessageIDURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewMessageIDURL instead of calling this object's -// NewMessageIDURL method. -func (m MessagesURL) NewMessageIDURL(messageID MessageID) MessageIDURL { - messageIDURL := appendToURLPath(m.URL(), messageID.String()) - return NewMessageIDURL(messageIDURL, m.client.Pipeline()) -} - -// Clear deletes all messages from a queue. For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/clear-messages. -func (m MessagesURL) Clear(ctx context.Context) (*MessagesClearResponse, error) { - return m.client.Clear(ctx, nil, nil) -} - -/////////////////////////////////////////////////////////////////////////////// - -// Enqueue adds a new message to the back of a queue. The visibility timeout specifies how long the message should be invisible -// to Dequeue and Peek operations. The message content must be a UTF-8 encoded string that is up to 64KB in size. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-message. -// The timeToLive interval for the message is defined in seconds. The maximum timeToLive can be any positive number, as well as -time.Second indicating that the message does not expire. -// If 0 is passed for timeToLive, the default value is 7 days. -func (m MessagesURL) Enqueue(ctx context.Context, messageText string, visibilityTimeout time.Duration, timeToLive time.Duration) (*EnqueueMessageResponse, error) { - vt := int32(visibilityTimeout.Seconds()) - - // timeToLive should only be sent if it's not 0 - var ttl *int32 = nil - if timeToLive != 0 { - ttlValue := int32(timeToLive.Seconds()) - ttl = &ttlValue - } - - resp, err := m.client.Enqueue(ctx, QueueMessage{MessageText: messageText}, &vt, ttl, nil, nil) - if err != nil { - return nil, err - } - - item := resp.Items[0] - return &EnqueueMessageResponse{ - inner: resp, - MessageID: MessageID(item.MessageID), - PopReceipt: PopReceipt(item.PopReceipt), - TimeNextVisible: item.TimeNextVisible, - InsertionTime: item.InsertionTime, - ExpirationTime: item.ExpirationTime, - }, nil -} - -// EnqueueMessageResponse holds the results of a successfully-enqueued message. -type EnqueueMessageResponse struct { - inner *EnqueueResponse - - // MessageID returns the service-assigned ID for the enqueued message. - MessageID MessageID - - // PopReceipt returns the service-assigned PopReceipt for the enqueued message. - // You could use this to create a MessageIDURL object. - PopReceipt PopReceipt - - // TimeNextVisible returns the time when the message next becomes visible. - TimeNextVisible time.Time - - // InsertionTime returns the time when the message was enqueued. - InsertionTime time.Time - - // ExpirationTime returns the time when the message will automatically be deleted from the queue. - ExpirationTime time.Time -} - -// Response returns the raw HTTP response object. -func (emr EnqueueMessageResponse) Response() *http.Response { - return emr.inner.Response() -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (emr EnqueueMessageResponse) StatusCode() int { - return emr.inner.StatusCode() -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (emr EnqueueMessageResponse) Status() string { - return emr.inner.Status() -} - -// Date returns the value for header Date. -func (emr EnqueueMessageResponse) Date() time.Time { - return emr.inner.Date() -} - -// RequestID returns the value for header x-ms-request-id. -func (emr EnqueueMessageResponse) RequestID() string { - return emr.inner.RequestID() -} - -// Version returns the value for header x-ms-version. -func (emr EnqueueMessageResponse) Version() string { - return emr.inner.Version() -} - -/////////////////////////////////////////////////////////////////////////////// - -// Dequeue retrieves one or more messages from the front of the queue. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-messages. -func (m MessagesURL) Dequeue(ctx context.Context, maxMessages int32, visibilityTimeout time.Duration) (*DequeuedMessagesResponse, error) { - vt := int32(visibilityTimeout.Seconds()) - qml, err := m.client.Dequeue(ctx, &maxMessages, &vt, nil, nil) - return &DequeuedMessagesResponse{inner: qml}, err -} - -// DequeueMessagesResponse holds the results of a successful call to Dequeue. -type DequeuedMessagesResponse struct { - inner *QueueMessagesList -} - -// Response returns the raw HTTP response object. -func (dmr DequeuedMessagesResponse) Response() *http.Response { - return dmr.inner.Response() -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (dmr DequeuedMessagesResponse) StatusCode() int { - return dmr.inner.StatusCode() -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (dmr DequeuedMessagesResponse) Status() string { - return dmr.inner.Status() -} - -// Date returns the value for header Date. -func (dmr DequeuedMessagesResponse) Date() time.Time { - return dmr.inner.Date() -} - -// RequestID returns the value for header x-ms-request-id. -func (dmr DequeuedMessagesResponse) RequestID() string { - return dmr.inner.RequestID() -} - -// Version returns the value for header x-ms-version. -func (dmr DequeuedMessagesResponse) Version() string { - return dmr.inner.Version() -} - -// NumMessages returns the number of messages retrieved by the call to Dequeue. -func (dmr DequeuedMessagesResponse) NumMessages() int32 { - return int32(len(dmr.inner.Items)) -} - -// Message returns the information for dequeued message. -func (dmr DequeuedMessagesResponse) Message(index int32) *DequeuedMessage { - v := dmr.inner.Items[index] - return &DequeuedMessage{ - ID: MessageID(v.MessageID), - InsertionTime: v.InsertionTime, - ExpirationTime: v.ExpirationTime, - PopReceipt: PopReceipt(v.PopReceipt), - NextVisibleTime: v.TimeNextVisible, - Text: v.MessageText, - DequeueCount: v.DequeueCount, - } -} - -// DequeuedMessage holds the properties of a single dequeued message. -type DequeuedMessage struct { - ID MessageID - InsertionTime time.Time - ExpirationTime time.Time - PopReceipt PopReceipt - NextVisibleTime time.Time - DequeueCount int64 - Text string // UTF-8 string -} - -/////////////////////////////////////////////////////////////////////////////// - -// Peek retrieves one or more messages from the front of the queue but does not alter the visibility of the message. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/peek-messages. -func (m MessagesURL) Peek(ctx context.Context, maxMessages int32) (*PeekedMessagesResponse, error) { - pr, err := m.client.Peek(ctx, &maxMessages, nil, nil) - return &PeekedMessagesResponse{inner: pr}, err -} - -// PeekedMessagesResponse holds the results of a successful call to Peek. -type PeekedMessagesResponse struct { - inner *PeekResponse -} - -// Response returns the raw HTTP response object. -func (pmr PeekedMessagesResponse) Response() *http.Response { - return pmr.inner.Response() -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pmr PeekedMessagesResponse) StatusCode() int { - return pmr.inner.StatusCode() -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pmr PeekedMessagesResponse) Status() string { - return pmr.inner.Status() -} - -// Date returns the value for header Date. -func (pmr PeekedMessagesResponse) Date() time.Time { - return pmr.inner.Date() -} - -// RequestID returns the value for header x-ms-request-id. -func (pmr PeekedMessagesResponse) RequestID() string { - return pmr.inner.RequestID() -} - -// Version returns the value for header x-ms-version. -func (pmr PeekedMessagesResponse) Version() string { - return pmr.inner.Version() -} - -// NumMessages returns the number of messages retrieved by the call to Peek. -func (pmr PeekedMessagesResponse) NumMessages() int32 { - return int32(len(pmr.inner.Items)) -} - -// Message returns the information for peeked message. -func (pmr PeekedMessagesResponse) Message(index int32) *PeekedMessage { - v := pmr.inner.Items[index] - return &PeekedMessage{ - ID: MessageID(v.MessageID), - InsertionTime: v.InsertionTime, - ExpirationTime: v.ExpirationTime, - Text: v.MessageText, - DequeueCount: v.DequeueCount, - } -} - -// PeekedMessage holds the properties of a peeked message. -type PeekedMessage struct { - ID MessageID - InsertionTime time.Time - ExpirationTime time.Time - DequeueCount int64 - Text string // UTF-8 string -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_queue.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_queue.go deleted file mode 100644 index ebb501a1efd..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_queue.go +++ /dev/null @@ -1,143 +0,0 @@ -package azqueue - -import ( - "context" - "net/url" - "strings" - - "fmt" - "github.com/Azure/azure-pipeline-go/pipeline" -) - -const ( - // QueueMaxMessagesDequeue indicates the maximum number of messages - // you can retrieve with each call to Dequeue (32). - QueueMaxMessagesDequeue = 32 - - // QueueMaxMessagesPeek indicates the maximum number of messages - // you can retrieve with each call to Peek (32). - QueueMaxMessagesPeek= 32 - - // QueueMessageMaxBytes indicates the maximum number of bytes allowed for a message's UTF-8 text. - QueueMessageMaxBytes = 64 * 1024 // 64KB -) - -// A QueueURL represents a URL to the Azure Storage queue. -type QueueURL struct { - client queueClient -} - -// NewQueueURL creates a QueueURL object using the specified URL and request policy pipeline. -func NewQueueURL(url url.URL, p pipeline.Pipeline) QueueURL { - client := newQueueClient(url, p) - return QueueURL{client: client} -} - -// URL returns the URL endpoint used by the QueueURL object. -func (q QueueURL) URL() url.URL { - return q.client.URL() -} - -// String returns the URL as a string. -func (q QueueURL) String() string { - u := q.URL() - return u.String() -} - -// WithPipeline creates a new QueueURL object identical to the source but with the specified request policy pipeline. -func (q QueueURL) WithPipeline(p pipeline.Pipeline) QueueURL { - return NewQueueURL(q.URL(), p) -} - -// NewMessagesURL creates a new MessagesURL object by concatenating "messages" to the end of -// QueueURL's URL. The new MessagesURL uses the same request policy pipeline as the QueueURL. -// To change the pipeline, create the MessagesURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewMessagesURL instead of calling this object's -// NewMessagesURL method. -func (q QueueURL) NewMessagesURL() MessagesURL { - messagesURL := appendToURLPath(q.URL(), "messages") - return NewMessagesURL(messagesURL, q.client.Pipeline()) -} - -// Create creates a queue within a storage account. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/create-queue4. -func (q QueueURL) Create(ctx context.Context, metadata Metadata) (*QueueCreateResponse, error) { - return q.client.Create(ctx, nil, metadata, nil) -} - -// Delete permanently deletes a queue. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/delete-queue3. -func (q QueueURL) Delete(ctx context.Context) (*QueueDeleteResponse, error) { - return q.client.Delete(ctx, nil, nil) -} - -// GetProperties retrieves queue properties and user-defined metadata and properties on the specified queue. -// Metadata is associated with the queue as name-values pairs. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-queue-metadata. -func (q QueueURL) GetProperties(ctx context.Context) (*QueueGetPropertiesResponse, error) { - return q.client.GetProperties(ctx, nil, nil) -} - -// SetMetadata sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-queue-metadata. -func (q QueueURL) SetMetadata(ctx context.Context, metadata Metadata) (*QueueSetMetadataResponse, error) { - return q.client.SetMetadata(ctx, nil, metadata, nil) -} - -// GetAccessPolicy returns details about any stored access policies specified on the queue that may be used with -// Shared Access Signatures. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-queue-acl. -func (q QueueURL) GetAccessPolicy(ctx context.Context) (*SignedIdentifiers, error) { - return q.client.GetAccessPolicy(ctx, nil, nil) -} - -// SetAccessPolicy sets sets stored access policies for the queue that may be used with Shared Access Signatures. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-queue-acl. -func (q QueueURL) SetAccessPolicy(ctx context.Context, permissions []SignedIdentifier) (*QueueSetAccessPolicyResponse, error) { - return q.client.SetAccessPolicy(ctx, permissions, nil, nil) -} - -// The AccessPolicyPermission type simplifies creating the permissions string for a queue's access policy. -// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. -type AccessPolicyPermission struct { - Read, Add, Update, ProcessMessages bool -} - -// String produces the access policy permission string for an Azure Storage queue. -// Call this method to set AccessPolicy's Permission field. -func (p AccessPolicyPermission) String() string { - var b strings.Builder - if p.Read { - b.WriteRune('r') - } - if p.Add { - b.WriteRune('a') - } - if p.Update { - b.WriteRune('u') - } - if p.ProcessMessages { - b.WriteRune('p') - } - return b.String() -} - -// Parse initializes the AccessPolicyPermission's fields from a string. -func (p *AccessPolicyPermission) Parse(s string) error { - *p = AccessPolicyPermission{} // Clear the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'a': - p.Add = true - case 'u': - p.Update = true - case 'p': - p.ProcessMessages = true - default: - return fmt.Errorf("invalid permission: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_service.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_service.go deleted file mode 100644 index 4734b69b187..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/url_service.go +++ /dev/null @@ -1,135 +0,0 @@ -package azqueue - -import ( - "context" - "github.com/Azure/azure-pipeline-go/pipeline" - "net/url" -) - -// A ServiceURL represents a URL to the Azure Storage Queue service allowing you to manipulate queues. -type ServiceURL struct { - client serviceClient -} - -// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline. -func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { - client := newServiceClient(primaryURL, p) - return ServiceURL{client: client} -} - -// URL returns the URL endpoint used by the ServiceURL object. -func (s ServiceURL) URL() url.URL { - return s.client.URL() -} - -// String returns the URL as a string. -func (s ServiceURL) String() string { - u := s.URL() - return u.String() -} - -// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline. -func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { - return NewServiceURL(s.URL(), p) -} - -// NewQueueURL creates a new QueueURL object by concatenating queueName to the end of -// ServiceURL's URL. The new QueueURL uses the same request policy pipeline as the ServiceURL. -// To change the pipeline, create the QueueURL and then call its WithPipeline method passing in the -// desired pipeline object. Or, call this package's NewQueueURL instead of calling this object's -// NewQueueURL method. -func (s ServiceURL) NewQueueURL(queueName string) QueueURL { - queueURL := appendToURLPath(s.URL(), queueName) - return NewQueueURL(queueURL, s.client.Pipeline()) -} - -// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) -func appendToURLPath(u url.URL, name string) url.URL { - // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" - // When you call url.Parse() this is what you'll get: - // Scheme: "https" - // Opaque: "" - // User: nil - // Host: "ms.com" - // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash - // RawPath: "" - // ForceQuery: false - // RawQuery: "k1=v1&k2=v2" - // Fragment: "f" - if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { - u.Path += "/" // Append "/" to end before appending name - } - u.Path += name - return u -} - -// ListQueuesSegment returns a single segment of queues starting from the specified Marker. Use an empty -// Marker to start enumeration from the beginning. Queue names are returned in lexicographic order. -// After getting a segment, process it, and then call ListQueuesSegment again (passing the the previously-returned -// Marker) to get the next segment. For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/list-queues1. -func (s ServiceURL) ListQueuesSegment(ctx context.Context, marker Marker, o ListQueuesSegmentOptions) (*ListQueuesSegmentResponse, error) { - prefix, include, maxResults := o.pointers() - return s.client.ListQueuesSegment(ctx, prefix, marker.Val, maxResults, - include, nil, nil) -} - -// ListQueuesSegmentOptions defines options available when calling ListQueuesSegment. -type ListQueuesSegmentOptions struct { - Detail ListQueuesSegmentDetails // No IncludeType header is produced if "" - Prefix string // No Prefix header is produced if "" - - // SetMaxResults sets the maximum desired results you want the service to return. - // Note, the service may return fewer results than requested. - // MaxResults=0 means no 'MaxResults' header specified. - MaxResults int32 -} - -func (o *ListQueuesSegmentOptions) pointers() (prefix *string, include ListQueuesIncludeType, maxResults *int32) { - if o.Prefix != "" { - prefix = &o.Prefix // else nil - } - if o.MaxResults != 0 { - maxResults = &o.MaxResults - } - if o.Detail.Metadata { - include = ListQueuesIncludeMetadata - } - return -} - -// ListQueuesSegmentDetails indicates what additional information the service should return with each queue. -type ListQueuesSegmentDetails struct { - // Tells the service whether to return metadata for each queue. - Metadata bool -} - -// slice produces the Include query parameter's value. -func (d *ListQueuesSegmentDetails) slice() []ListQueuesIncludeType { - items := []ListQueuesIncludeType{} - // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! - if d.Metadata { - items = append(items, ListQueuesIncludeMetadata) - } - return items -} - -// GetProperties gets the properties of a storage account’s Queue service, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-queue-service-properties. -func (s ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { - return s.client.GetProperties(ctx, nil, nil) -} - -// SetProperties sets properties for a storage account’s Queue service endpoint, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/set-queue-service-properties. -func (s ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { - return s.client.SetProperties(ctx, properties, nil, nil) -} - -// GetStatistics retrieves statistics related to replication for the Queue service. It is only available on the -// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/get-queue-service-stats. -func (s ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { - return s.client.GetStatistics(ctx, nil, nil) -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/version.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/version.go deleted file mode 100644 index 4000e0b537c..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package azqueue - -const serviceLibVersion = "0.3" - diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_anonymous.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_anonymous.go deleted file mode 100644 index 8c3645b1555..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_anonymous.go +++ /dev/null @@ -1,55 +0,0 @@ -package azqueue - -import ( - "context" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// Credential represent any credential type; it is used to create a credential policy Factory. -type Credential interface { - pipeline.Factory - credentialMarker() -} - -type credentialFunc pipeline.FactoryFunc - -func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return f(next, po) -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (credentialFunc) credentialMarker() {} - -////////////////////////////// - -// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource -// or for use with Shared Access Signatures (SAS). -func NewAnonymousCredential() Credential { - return anonymousCredentialFactory -} - -var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton - -// anonymousCredentialPolicyFactory is the credential's policy factory. -type anonymousCredentialPolicyFactory struct { -} - -// New creates a credential policy object. -func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return &anonymousCredentialPolicy{next: next} -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*anonymousCredentialPolicyFactory) credentialMarker() {} - -// anonymousCredentialPolicy is the credential's policy object. -type anonymousCredentialPolicy struct { - next pipeline.Policy -} - -// Do implements the credential's policy interface. -func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - // For anonymous credentials, this is effectively a no-op - return p.next.Do(ctx, request) -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_shared_key.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_shared_key.go deleted file mode 100644 index 7e0cb445353..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_shared_key.go +++ /dev/null @@ -1,196 +0,0 @@ -package azqueue - -import ( - "bytes" - "context" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "errors" - "net/http" - "net/url" - "sort" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the -// storage account's name and either its primary or secondary key. -func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { - bytes, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return &SharedKeyCredential{}, err - } - return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil -} - -// SharedKeyCredential contains an account's name and its primary or secondary key. -// It is immutable making it shareable and goroutine-safe. -type SharedKeyCredential struct { - // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only - accountName string - accountKey []byte -} - -// AccountName returns the Storage account's name. -func (f SharedKeyCredential) AccountName() string { - return f.accountName -} - -// New creates a credential policy object. -func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - // Add a x-ms-date header if it doesn't already exist - if d := request.Header.Get(headerXmsDate); d == "" { - request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} - } - stringToSign, err := f.buildStringToSign(request) - if err != nil { - return nil, err - } - signature := f.ComputeHMACSHA256(stringToSign) - authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") - request.Header[headerAuthorization] = []string{authHeader} - - response, err := next.Do(ctx, request) - if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { - // Service failed to authenticate request, log it - po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") - } - return response, err - }) -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*SharedKeyCredential) credentialMarker() {} - -// Constants ensuring that header names are correctly spelled and consistently cased. -const ( - headerAuthorization = "Authorization" - headerCacheControl = "Cache-Control" - headerContentEncoding = "Content-Encoding" - headerContentDisposition = "Content-Disposition" - headerContentLanguage = "Content-Language" - headerContentLength = "Content-Length" - headerContentMD5 = "Content-MD5" - headerContentType = "Content-Type" - headerDate = "Date" - headerIfMatch = "If-Match" - headerIfModifiedSince = "If-Modified-Since" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" - headerUserAgent = "User-Agent" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" -) - -// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. -func (f *SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { - h := hmac.New(sha256.New, f.accountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - headers := request.Header - contentLength := headers.Get(headerContentLength) - if contentLength == "0" { - contentLength = "" - } - - canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) - if err != nil { - return "", err - } - - stringToSign := strings.Join([]string{ - request.Method, - headers.Get(headerContentEncoding), - headers.Get(headerContentLanguage), - contentLength, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - "", // Empty date because x-ms-date is expected (as per web page above) - headers.Get(headerIfModifiedSince), - headers.Get(headerIfMatch), - headers.Get(headerIfNoneMatch), - headers.Get(headerIfUnmodifiedSince), - headers.Get(headerRange), - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - return stringToSign, nil -} - -func buildCanonicalizedHeader(headers http.Header) string { - cm := map[string][]string{} - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v // NOTE: the value must not have any whitespace around it. - } - } - if len(cm) == 0 { - return "" - } - - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - sort.Strings(keys) - ch := bytes.NewBufferString("") - for i, key := range keys { - if i > 0 { - ch.WriteRune('\n') - } - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(strings.Join(cm[key], ",")) - } - return string(ch.Bytes()) -} - -func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - cr := bytes.NewBufferString("/") - cr.WriteString(f.accountName) - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) - } else { - // a slash is required to indicate the root path - cr.WriteString("/") - } - - // params is a map[string][]string; param name is key; params values is []string - params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values - if err != nil { - return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") - } - - if len(params) > 0 { // There is at least 1 query parameter - paramNames := []string{} // We use this to sort the parameter key names - for paramName := range params { - paramNames = append(paramNames, paramName) // paramNames must be lowercase - } - sort.Strings(paramNames) - - for _, paramName := range paramNames { - paramValues := params[paramName] - sort.Strings(paramValues) - - // Join the sorted key values separated by ',' - // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) - } - } - return string(cr.Bytes()), nil -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_token.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_token.go deleted file mode 100644 index 81cf2395122..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_credential_token.go +++ /dev/null @@ -1,137 +0,0 @@ -package azqueue - -import ( - "context" - "errors" - "sync/atomic" - - "runtime" - "sync" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// TokenRefresher represents a callback method that you write; this method is called periodically -// so you can refresh the token credential's value. -type TokenRefresher func(credential TokenCredential) time.Duration - -// TokenCredential represents a token credential (which is also a pipeline.Factory). -type TokenCredential interface { - Credential - Token() string - SetToken(newToken string) -} - -// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage -// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for -// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the -// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration -// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. -// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your -// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a -// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline. -func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { - tc := &tokenCredential{} - tc.SetToken(initialToken) // We don't set it above to guarantee atomicity - if tokenRefresher == nil { - return tc // If no callback specified, return the simple tokenCredential - } - - tcwr := &tokenCredentialWithRefresh{token: tc} - tcwr.token.startRefresh(tokenRefresher) - runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { - deadTC.token.stopRefresh() - deadTC.token = nil // Sanity (not really required) - }) - return tcwr -} - -// tokenCredentialWithRefresh is a wrapper over a token credential. -// When this wrapper object gets GC'd, it stops the tokenCredential's timer -// which allows the tokenCredential object to also be GC'd. -type tokenCredentialWithRefresh struct { - token *tokenCredential -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*tokenCredentialWithRefresh) credentialMarker() {} - -// Token returns the current token value -func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } - -// SetToken changes the current token value -func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } - -// New satisfies pipeline.Factory's New method creating a pipeline policy object. -func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return f.token.New(next, po) -} - -/////////////////////////////////////////////////////////////////////////////// - -// tokenCredential is a pipeline.Factory is the credential's policy factory. -type tokenCredential struct { - token atomic.Value - - // The members below are only used if the user specified a tokenRefresher callback function. - timer *time.Timer - tokenRefresher TokenRefresher - lock sync.Mutex - stopped bool -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (*tokenCredential) credentialMarker() {} - -// Token returns the current token value -func (f *tokenCredential) Token() string { return f.token.Load().(string) } - -// SetToken changes the current token value -func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } - -// startRefresh calls refresh which immediately calls tokenRefresher -// and then starts a timer to call tokenRefresher in the future. -func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { - f.tokenRefresher = tokenRefresher - f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again - f.refresh() -} - -// refresh calls the user's tokenRefresher so they can refresh the token (by -// calling SetToken) and then starts another time (based on the returned duration) -// in order to refresh the token again in the future. -func (f *tokenCredential) refresh() { - d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock - if d > 0 { // If duration is 0 or negative, refresher wants to not be called again - f.lock.Lock() - if !f.stopped { - f.timer = time.AfterFunc(d, f.refresh) - } - f.lock.Unlock() - } -} - -// stopRefresh stops any pending timer and sets stopped field to true to prevent -// any new timer from starting. -// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object. -func (f *tokenCredential) stopRefresh() { - f.lock.Lock() - f.stopped = true - if f.timer != nil { - f.timer.Stop() - } - f.lock.Unlock() -} - -// New satisfies pipeline.Factory's New method creating a pipeline policy object. -func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - if request.URL.Scheme != "https" { - // HTTPS must be used, otherwise the tokens are at the risk of being exposed - return nil, errors.New("token credentials require a URL using the https protocol scheme") - } - request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} - return next.Do(ctx, request) - }) -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_pipeline.go deleted file mode 100644 index d20f20e1e62..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_pipeline.go +++ /dev/null @@ -1,43 +0,0 @@ -package azqueue - -import ( - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// PipelineOptions is used to configure a request policy pipeline's retry policy and logging. -type PipelineOptions struct { - // Log configures the pipeline's logging infrastructure indicating what information is logged and where. - Log pipeline.LogOptions - - // Retry configures the built-in retry policy behavior. - Retry RetryOptions - - // RequestLog configures the built-in request logging policy. - RequestLog RequestLogOptions - - // Telemetry configures the built-in telemetry policy behavior. - Telemetry TelemetryOptions -} - -// NewPipeline creates a Pipeline using the specified credentials and options. -func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { - // Closest to API goes first; closest to the wire goes last - f := []pipeline.Factory{ - NewTelemetryPolicyFactory(o.Telemetry), - NewUniqueRequestIDPolicyFactory(), - NewRetryPolicyFactory(o.Retry), - } - - if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { - // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything - // NOTE: The credential's policy factory must appear close to the wire so it can sign any - // changes made by other factories (like UniqueRequestIDPolicyFactory) - f = append(f, c) - } - f = append(f, - NewRequestLogPolicyFactory(o.RequestLog), - pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked - - - return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: nil, Log: o.Log}) -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_request_log.go deleted file mode 100644 index d713526f5f9..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_request_log.go +++ /dev/null @@ -1,182 +0,0 @@ -package azqueue - -import ( - "bytes" - "context" - "fmt" - "net/http" - "net/url" - "runtime" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// RequestLogOptions configures the retry policy's behavior. -type RequestLogOptions struct { - // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified - // duration (-1=no logging; 0=default threshold). - LogWarningIfTryOverThreshold time.Duration -} - -func (o RequestLogOptions) defaults() RequestLogOptions { - if o.LogWarningIfTryOverThreshold == 0 { - // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/ - // But this monitors the time to get the HTTP response; NOT the time to download the response body. - o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds - } - return o -} - -// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options. -func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { - o = o.defaults() // Force defaults to be calculated - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - // These variables are per-policy; shared by multiple calls to Do - var try int32 - operationStart := time.Now() // If this is the 1st try, record the operation state time - return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { - try++ // The first try is #1 (not #0) - - // Log the outgoing request as informational - if po.ShouldLog(pipeline.LogInfo) { - b := &bytes.Buffer{} - fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) - pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) - po.Log(pipeline.LogInfo, b.String()) - } - - // Set the time for this particular retry operation and then Do the operation. - tryStart := time.Now() - response, err = next.Do(ctx, request) // Make the request - tryEnd := time.Now() - tryDuration := tryEnd.Sub(tryStart) - opDuration := tryEnd.Sub(operationStart) - - logLevel, forceLog := pipeline.LogInfo, false // Default logging information - - // If the response took too long, we'll upgrade to warning. - if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { - // Log a warning if the try duration exceeded the specified threshold - logLevel, forceLog = pipeline.LogWarning, true - } - - if err == nil { // We got a response from the service - sc := response.Response().StatusCode - if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { - logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx - } else { - // For other status codes, we leave the level as is. - } - } else { // This error did not get an HTTP response from the service; upgrade the severity to Error - logLevel, forceLog = pipeline.LogError, true - } - - if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { - // We're going to log this; build the string to log - b := &bytes.Buffer{} - slow := "" - if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { - slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) - } - fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) - if err != nil { // This HTTP request did not get a response from the service - fmt.Fprint(b, "REQUEST ERROR\n") - } else { - if logLevel == pipeline.LogError { - fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") - } else { - fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") - } - } - - pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) - if logLevel <= pipeline.LogError { - b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation) - } - msg := b.String() - - if forceLog { - pipeline.ForceLog(logLevel, msg) - } - if shouldLog { - po.Log(logLevel, msg) - } - } - return response, err - } - }) -} - -// redactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret. -func redactSigQueryParam(rawQuery string) (bool, string) { - rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig= - sigFound := strings.Contains(rawQuery, "?sig=") - if !sigFound { - sigFound = strings.Contains(rawQuery, "&sig=") - if !sigFound { - return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation) - } - } - // [?|&]sig= found, redact its value - values, _ := url.ParseQuery(rawQuery) - for name := range values { - if strings.EqualFold(name, "sig") { - values[name] = []string{"REDACTED"} - } - } - return sigFound, values.Encode() -} - -func prepareRequestForLogging(request pipeline.Request) *http.Request { - req := request - if sigFound, rawQuery := redactSigQueryParam(req.URL.RawQuery); sigFound { - // Make copy so we don't destroy the query parameters we actually need to send in the request - req = request.Copy() - req.Request.URL.RawQuery = rawQuery - } - - return prepareRequestForServiceLogging(req) -} - -func stack() []byte { - buf := make([]byte, 1024) - for { - n := runtime.Stack(buf, false) - if n < len(buf) { - return buf[:n] - } - buf = make([]byte, 2*len(buf)) - } -} - -/////////////////////////////////////////////////////////////////////////////////////// -// Redact phase useful for blob and file service only. For other services, -// this method can directly return request.Request. -/////////////////////////////////////////////////////////////////////////////////////// -func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { - req := request - if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { - req = request.Copy() - url, err := url.Parse(req.Header.Get(key)) - if err == nil { - if sigFound, rawQuery := redactSigQueryParam(url.RawQuery); sigFound { - url.RawQuery = rawQuery - req.Header.Set(xMsCopySourceHeader, url.String()) - } - } - } - return req.Request -} - -const xMsCopySourceHeader = "x-ms-copy-source" - -func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { - for keyInHeader := range header { - if strings.EqualFold(keyInHeader, key) { - return true, keyInHeader - } - } - return false, "" -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_retry.go deleted file mode 100644 index bf566407cab..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_retry.go +++ /dev/null @@ -1,408 +0,0 @@ -package azqueue - -import ( - "context" - "errors" - "io" - "io/ioutil" - "math/rand" - "net" - "net/http" - "strconv" - "strings" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. -type RetryPolicy int32 - -const ( - // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy - RetryPolicyExponential RetryPolicy = 0 - - // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy - RetryPolicyFixed RetryPolicy = 1 -) - -// RetryOptions configures the retry policy's behavior. -type RetryOptions struct { - // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\ - // A value of zero means that you accept our default policy. - Policy RetryPolicy - - // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default). - // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries. - MaxTries int32 - - // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. - // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts - // of data, the default TryTimeout will probably not be sufficient. You should override this value - // based on the bandwidth available to the host machine and proximity to the Storage service. A good - // starting point may be something like (60 seconds per MB of anticipated-payload-size). - TryTimeout time.Duration - - // RetryDelay specifies the amount of delay to use before retrying an operation (0=default). - // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially - // with each retry up to a maximum specified by MaxRetryDelay. - // If you specify 0, then you must also specify 0 for MaxRetryDelay. - // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be - // equal to or greater than RetryDelay. - RetryDelay time.Duration - - // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). - // If you specify 0, then you must also specify 0 for RetryDelay. - MaxRetryDelay time.Duration - - // RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host. - // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. - // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent - // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs - RetryReadsFromSecondaryHost string -} - -func (o RetryOptions) retryReadsFromSecondaryHost() string { - return o.RetryReadsFromSecondaryHost -} - -func (o RetryOptions) defaults() RetryOptions { - // We assume the following: - // 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed - // 2. o.MaxTries >= 0 - // 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0 - // 4. o.RetryDelay <= o.MaxRetryDelay - // 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0 - - IfDefault := func(current *time.Duration, desired time.Duration) { - if *current == time.Duration(0) { - *current = desired - } - } - - // Set defaults if unspecified - if o.MaxTries == 0 { - o.MaxTries = 4 - } - switch o.Policy { - case RetryPolicyExponential: - IfDefault(&o.TryTimeout, 1*time.Minute) - IfDefault(&o.RetryDelay, 4*time.Second) - IfDefault(&o.MaxRetryDelay, 120*time.Second) - - case RetryPolicyFixed: - IfDefault(&o.TryTimeout, 1*time.Minute) - IfDefault(&o.RetryDelay, 30*time.Second) - IfDefault(&o.MaxRetryDelay, 120*time.Second) - } - return o -} - -func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0 - pow := func(number int64, exponent int32) int64 { // pow is nested helper function - var result int64 = 1 - for n := int32(0); n < exponent; n++ { - result *= number - } - return result - } - - delay := time.Duration(0) - switch o.Policy { - case RetryPolicyExponential: - delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay - - case RetryPolicyFixed: - if try > 1 { // Any try after the 1st uses the fixed delay - delay = o.RetryDelay - } - } - - // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) - delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand - if delay > o.MaxRetryDelay { - delay = o.MaxRetryDelay - } - return delay -} - -// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. -func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { - o = o.defaults() // Force defaults to be calculated - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { - // Before each try, we'll select either the primary or secondary URL. - primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC - - // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use - considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" - - // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) - // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable - // If using a secondary: - // Even tries go against primary; odd tries go against the secondary - // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) - // If secondary gets a 404, don't fail, retry but future retries are only against the primary - // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) - for try := int32(1); try <= o.MaxTries; try++ { - logf("\n=====> Try=%d\n", try) - - // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. - tryingPrimary := !considerSecondary || (try%2 == 1) - // Select the correct host and delay - if tryingPrimary { - primaryTry++ - delay := o.calcDelay(primaryTry) - logf("Primary try=%d, Delay=%v\n", primaryTry, delay) - time.Sleep(delay) // The 1st try returns 0 delay - } else { - delay := time.Second * time.Duration(rand.Float32()/2+0.8) - logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) - time.Sleep(delay) // Delay with some jitter before trying secondary - } - - // Clone the original request to ensure that each try starts with the original (unmutated) request. - requestCopy := request.Copy() - - // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because - // the stream may not be at offset 0 when we first get it and we want the same behavior for the - // 1st try as for additional tries. - err = requestCopy.RewindBody() - if err != nil { - return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") - } - - if !tryingPrimary { - requestCopy.Request.URL.Host = o.retryReadsFromSecondaryHost() - } - - // Set the server-side timeout query parameter "timeout=[seconds]" - timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try - if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline - logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) - if t < timeout { - timeout = t - } - if timeout < 0 { - timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging - } - logf("TryTimeout adjusted to=%d sec\n", timeout) - } - q := requestCopy.Request.URL.Query() - q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up" - requestCopy.Request.URL.RawQuery = q.Encode() - logf("Url=%s\n", requestCopy.Request.URL.String()) - - // Set the time for this particular retry operation and then Do the operation. - tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) - //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} - response, err = next.Do(tryCtx, requestCopy) // Make the request - /*err = improveDeadlineExceeded(err) - if err == nil { - response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} - }*/ - logf("Err=%v, response=%v\n", err, response) - - action := "" // This MUST get changed within the switch code below - switch { - case ctx.Err() != nil: - action = "NoRetry: Op timeout" - case !tryingPrimary && response != nil && response.Response().StatusCode == http.StatusNotFound: - // If attempt was against the secondary & it returned a StatusNotFound (404), then - // the resource was not found. This may be due to replication delay. So, in this - // case, we'll never try the secondary again for this operation. - considerSecondary = false - action = "Retry: Secondary URL returned 404" - case err != nil: - // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation. - // Use ServiceCode to verify if the error is related to storage service-side, - // ServiceCode is set only when error related to storage service happened. - if stErr, ok := err.(StorageError); ok { - if stErr.Temporary() { - action = "Retry: StorageError with error service code and Temporary()" - } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError - action = "Retry: StorageError with success status code" - } else { - action = "NoRetry: StorageError not Temporary() and without retriable status code" - } - } else if netErr, ok := err.(net.Error); ok { - // Use non-retriable net.Error list, but not retriable list. - // As there are errors without Temporary() implementation, - // while need be retried, like 'connection reset by peer', 'transport connection broken' and etc. - // So the SDK do retry for most of the case, unless the error should not be retried for sure. - if !isNotRetriable(netErr) { - action = "Retry: net.Error and not in the non-retriable list" - } else { - action = "NoRetry: net.Error and in the non-retriable list" - } - } else { - action = "NoRetry: unrecognized error" - } - default: - action = "NoRetry: successful HTTP request" // no error - } - - logf("Action=%s\n", action) - // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying - if action[0] != 'R' { // Retry only if action starts with 'R' - if err != nil { - tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context - } else { - // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. - // So, when the user closes the Body, the our per-try context gets closed too. - // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) - if response == nil || response.Response() == nil { - // We do panic in the case response or response.Response() is nil, - // as for client, the response should not be nil if request is sent and the operations is executed successfully. - // Another option, is that execute the cancel function when response or response.Response() is nil, - // as in this case, current per-try has nothing to do in future. - return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") - } - response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} - } - break // Don't retry - } - if response != nil && response.Response() != nil && response.Response().Body != nil { - // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - body := response.Response().Body - io.Copy(ioutil.Discard, body) - body.Close() - } - // If retrying, cancel the current per-try timeout context - tryCancel() - } - return response, err // Not retryable or too many retries; return the last response/error - } - }) -} - -// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed. -type contextCancelReadCloser struct { - cf context.CancelFunc - body io.ReadCloser -} - -func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { - return rc.body.Read(p) -} - -func (rc *contextCancelReadCloser) Close() error { - err := rc.body.Close() - if rc.cf != nil { - rc.cf() - } - return err -} - -// isNotRetriable checks if the provided net.Error isn't retriable. -func isNotRetriable(errToParse net.Error) bool { - // No error, so this is NOT retriable. - if errToParse == nil { - return true - } - - // The error is either temporary or a timeout so it IS retriable (not not retriable). - if errToParse.Temporary() || errToParse.Timeout() { - return false - } - - genericErr := error(errToParse) - - // From here all the error are neither Temporary() nor Timeout(). - switch err := errToParse.(type) { - case *net.OpError: - // The net.Error is also a net.OpError but the inner error is nil, so this is not retriable. - if err.Err == nil { - return true - } - genericErr = err.Err - } - - switch genericErr.(type) { - case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: - // If the error is one of the ones listed, then it is NOT retriable. - return true - } - - // If it's invalid header field name/value error thrown by http module, then it is NOT retriable. - // This could happen when metadata's key or value is invalid. (RoundTrip in transport.go) - if strings.Contains(genericErr.Error(), "invalid header field") { - return true - } - - // Assume the error is retriable. - return false -} - -var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} - -func isSuccessStatusCode(resp *http.Response) bool { - if resp == nil { - return false - } - for _, i := range successStatusCodes { - if i == resp.StatusCode { - return true - } - } - return false -} - -// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away -var logf = func(format string, a ...interface{}) {} - -// Use this version to see the retry method's code path (import "fmt") -//var logf = fmt.Printf - -/* -type deadlineExceededReadCloser struct { - r io.ReadCloser -} - -func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { - n, err := 0, io.EOF - if r.r != nil { - n, err = r.r.Read(p) - } - return n, improveDeadlineExceeded(err) -} -func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { - // For an HTTP request, the ReadCloser MUST also implement seek - // For an HTTP response, Seek MUST not be called (or this will panic) - o, err := r.r.(io.Seeker).Seek(offset, whence) - return o, improveDeadlineExceeded(err) -} -func (r *deadlineExceededReadCloser) Close() error { - if c, ok := r.r.(io.Closer); ok { - c.Close() - } - return nil -} - -// timeoutError is the internal struct that implements our richer timeout error. -type deadlineExceeded struct { - responseError -} - -var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time - -// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error. -func improveDeadlineExceeded(cause error) error { - // If cause is not DeadlineExceeded, return the same error passed in. - if cause != context.DeadlineExceeded { - return cause - } - // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message - return &deadlineExceeded{ - responseError: responseError{ - ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), - }, - } -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e *deadlineExceeded) Error() string { - return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") -} -*/ diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_telemetry.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_telemetry.go deleted file mode 100644 index 61dab3bce60..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_telemetry.go +++ /dev/null @@ -1,51 +0,0 @@ -package azqueue - -import ( - "bytes" - "context" - "fmt" - "os" - "runtime" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// TelemetryOptions configures the telemetry policy's behavior. -type TelemetryOptions struct { - // Value is a string prepended to each request's User-Agent and sent to the service. - // The service records the user-agent in logs for diagnostics and tracking of client requests. - Value string -} - -// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects -// which add telemetry information to outgoing HTTP requests. -func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { - b := &bytes.Buffer{} - b.WriteString(o.Value) - if b.Len() > 0 { - b.WriteRune(' ') - } - fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) - telemetryValue := b.String() - - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - request.Header.Set("User-Agent", telemetryValue) - return next.Do(ctx, request) - } - }) -} - -// NOTE: the ONLY function that should write to this variable is this func -var platformInfo = func() string { - // Azure-Storage/version (runtime; os type and version)” - // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' - operatingSystem := runtime.GOOS // Default OS string - switch operatingSystem { - case "windows": - operatingSystem = os.Getenv("OS") // Get more specific OS information - case "linux": // accept default OS info - case "freebsd": // accept default OS info - } - return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) -}() diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_unique_request_id.go deleted file mode 100644 index db371086bad..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_policy_unique_request_id.go +++ /dev/null @@ -1,24 +0,0 @@ -package azqueue - -import ( - "context" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object -// that sets the request's x-ms-client-request-id header if it doesn't already exist. -func NewUniqueRequestIDPolicyFactory() pipeline.Factory { - return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - // This is Policy's Do method: - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - id := request.Header.Get(xMsClientRequestID) - if id == "" { // Add a unique request ID if the caller didn't specify one already - request.Header.Set(xMsClientRequestID, newUUID().String()) - } - return next.Do(ctx, request) - } - }) -} - -const xMsClientRequestID = "x-ms-client-request-id" diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_account.go deleted file mode 100644 index f44ea5ff66a..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_account.go +++ /dev/null @@ -1,218 +0,0 @@ -package azqueue - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" -) - -// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. -// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas -type AccountSASSignatureValues struct { - Version string `param:"sv"` // If not specified, this defaults to SASVersion - Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() - IPRange IPRange `param:"sip"` - Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() - ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() -} - -// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce -// the proper SAS query parameters. -func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS - if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { - return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") - } - if v.Version == "" { - v.Version = SASVersion - } - perms := &AccountSASPermissions{} - if err := perms.Parse(v.Permissions); err != nil { - return SASQueryParameters{}, err - } - v.Permissions = perms.String() - - startTime, expiryTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime) - - stringToSign := strings.Join([]string{ - sharedKeyCredential.AccountName(), - v.Permissions, - v.Services, - v.ResourceTypes, - startTime, - expiryTime, - v.IPRange.String(), - string(v.Protocol), - v.Version, - ""}, // That right, the account SAS requires a terminating extra newline - "\n") - - signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) - p := SASQueryParameters{ - // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, - - // Account-specific SAS parameters - services: v.Services, - resourceTypes: v.ResourceTypes, - - // Calculated SAS signature - signature: signature, - } - return p, nil -} - -// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. -type AccountSASPermissions struct { - Read, Write, Delete, List, Add, Create, Update, Process bool -} - -// String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Permissions field. -func (p AccountSASPermissions) String() string { - var buffer bytes.Buffer - if p.Read { - buffer.WriteRune('r') - } - if p.Write { - buffer.WriteRune('w') - } - if p.Delete { - buffer.WriteRune('d') - } - if p.List { - buffer.WriteRune('l') - } - if p.Add { - buffer.WriteRune('a') - } - if p.Create { - buffer.WriteRune('c') - } - if p.Update { - buffer.WriteRune('u') - } - if p.Process { - buffer.WriteRune('p') - } - return buffer.String() -} - -// Parse initializes the AccountSASPermissions's fields from a string. -func (p *AccountSASPermissions) Parse(s string) error { - *p = AccountSASPermissions{} // Clear out the flags - for _, r := range s { - switch r { - case 'r': - p.Read = true - case 'w': - p.Write = true - case 'd': - p.Delete = true - case 'l': - p.List = true - case 'a': - p.Add = true - case 'c': - p.Create = true - case 'u': - p.Update = true - case 'p': - p.Process = true - default: - return fmt.Errorf("Invalid permission character: '%v'", r) - } - } - return nil -} - -// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. -type AccountSASServices struct { - Blob, Queue, File bool -} - -// String produces the SAS services string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's Services field. -func (s AccountSASServices) String() string { - var buffer bytes.Buffer - if s.Blob { - buffer.WriteRune('b') - } - if s.Queue { - buffer.WriteRune('q') - } - if s.File { - buffer.WriteRune('f') - } - return buffer.String() -} - -// Parse initializes the AccountSASServices' fields from a string. -func (a *AccountSASServices) Parse(s string) error { - *a = AccountSASServices{} // Clear out the flags - for _, r := range s { - switch r { - case 'b': - a.Blob = true - case 'q': - a.Queue = true - case 'f': - a.File = true - default: - return fmt.Errorf("Invalid service character: '%v'", r) - } - } - return nil -} - -// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. -type AccountSASResourceTypes struct { - Service, Container, Object bool -} - -// String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues's ResourceTypes field. -func (rt AccountSASResourceTypes) String() string { - var buffer bytes.Buffer - if rt.Service { - buffer.WriteRune('s') - } - if rt.Container { - buffer.WriteRune('c') - } - if rt.Object { - buffer.WriteRune('o') - } - return buffer.String() -} - -// Parse initializes the AccountSASResourceType's fields from a string. -func (rt *AccountSASResourceTypes) Parse(s string) error { - *rt = AccountSASResourceTypes{} // Clear out the flags - for _, r := range s { - switch r { - case 's': - rt.Service = true - case 'c': - rt.Container = true - case 'o': - rt.Object = true - default: - return fmt.Errorf("Invalid resource type: '%v'", r) - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_query_params.go deleted file mode 100644 index 5c14a5fe4b0..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_sas_query_params.go +++ /dev/null @@ -1,211 +0,0 @@ -package azqueue - -import ( - "net" - "net/url" - "strings" - "time" -) - -// SASVersion indicates the SAS version. -const SASVersion = ServiceVersion - -type SASProtocol string - -const ( - // SASProtocolHTTPS can be specified for a SAS protocol - SASProtocolHTTPS SASProtocol = "https" - - // SASProtocolHTTPSandHTTP can be specified for a SAS protocol - SASProtocolHTTPSandHTTP SASProtocol = "https,http" -) - -// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a -// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). -func FormatTimesForSASSigning(startTime, expiryTime time.Time) (string, string) { - ss := "" - if !startTime.IsZero() { - ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" - } - se := "" - if !expiryTime.IsZero() { - se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" - } - return ss, se -} - -// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. -const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 - -// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas - -// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. -// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components -// to a query parameter map by calling AddToValues(). -// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. -// -// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). -type SASQueryParameters struct { - // All members are immutable or values so copies of this struct are goroutine-safe. - version string `param:"sv"` - services string `param:"ss"` - resourceTypes string `param:"srt"` - protocol SASProtocol `param:"spr"` - startTime time.Time `param:"st"` - expiryTime time.Time `param:"se"` - ipRange IPRange `param:"sip"` - identifier string `param:"si"` - resource string `param:"sr"` - permissions string `param:"sp"` - signature string `param:"sig"` -} - -func (p *SASQueryParameters) Version() string { - return p.version -} - -func (p *SASQueryParameters) Services() string { - return p.services -} -func (p *SASQueryParameters) ResourceTypes() string { - return p.resourceTypes -} -func (p *SASQueryParameters) Protocol() SASProtocol { - return p.protocol -} -func (p *SASQueryParameters) StartTime() time.Time { - return p.startTime -} -func (p *SASQueryParameters) ExpiryTime() time.Time { - return p.expiryTime -} - -func (p *SASQueryParameters) IPRange() IPRange { - return p.ipRange -} - -func (p *SASQueryParameters) Identifier() string { - return p.identifier -} - -func (p *SASQueryParameters) Resource() string { - return p.resource -} -func (p *SASQueryParameters) Permissions() string { - return p.permissions -} - -func (p *SASQueryParameters) Signature() string { - return p.signature -} - -// IPRange represents a SAS IP range's start IP and (optionally) end IP. -type IPRange struct { - Start net.IP // Not specified if length = 0 - End net.IP // Not specified if length = 0 -} - -// String returns a string representation of an IPRange. -func (ipr *IPRange) String() string { - if len(ipr.Start) == 0 { - return "" - } - start := ipr.Start.String() - if len(ipr.End) == 0 { - return start - } - return start + "-" + ipr.End.String() -} - -// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the -// query parameter map's passed-in values. If deleteSASParametersFromValues is true, -// all SAS-related query parameters are removed from the passed-in map. If -// deleteSASParametersFromValues is false, the map passed-in map is unaltered. -func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { - p := SASQueryParameters{} - for k, v := range values { - val := v[0] - isSASKey := true - switch strings.ToLower(k) { - case "sv": - p.version = val - case "ss": - p.services = val - case "srt": - p.resourceTypes = val - case "spr": - p.protocol = SASProtocol(val) - case "st": - p.startTime, _ = time.Parse(SASTimeFormat, val) - case "se": - p.expiryTime, _ = time.Parse(SASTimeFormat, val) - case "sip": - dashIndex := strings.Index(val, "-") - if dashIndex == -1 { - p.ipRange.Start = net.ParseIP(val) - } else { - p.ipRange.Start = net.ParseIP(val[:dashIndex]) - p.ipRange.End = net.ParseIP(val[dashIndex+1:]) - } - case "si": - p.identifier = val - case "sr": - p.resource = val - case "sp": - p.permissions = val - case "sig": - p.signature = val - default: - isSASKey = false // We didn't recognize the query parameter - } - if isSASKey && deleteSASParametersFromValues { - delete(values, k) - } - } - return p -} - -// AddToValues adds the SAS components to the specified query parameters map. -func (p *SASQueryParameters) addToValues(v url.Values) url.Values { - if p.version != "" { - v.Add("sv", p.version) - } - if p.services != "" { - v.Add("ss", p.services) - } - if p.resourceTypes != "" { - v.Add("srt", p.resourceTypes) - } - if p.protocol != "" { - v.Add("spr", string(p.protocol)) - } - if !p.startTime.IsZero() { - v.Add("st", p.startTime.Format(SASTimeFormat)) - } - if !p.expiryTime.IsZero() { - v.Add("se", p.expiryTime.Format(SASTimeFormat)) - } - if len(p.ipRange.Start) > 0 { - v.Add("sip", p.ipRange.String()) - } - if p.identifier != "" { - v.Add("si", p.identifier) - } - if p.resource != "" { - v.Add("sr", p.resource) - } - if p.permissions != "" { - v.Add("sp", p.permissions) - } - if p.signature != "" { - v.Add("sig", p.signature) - } - return v -} - -// Encode encodes the SAS query parameters into URL encoded form sorted by key. -func (p *SASQueryParameters) Encode() string { - v := url.Values{} - p.addToValues(v) - return v.Encode() -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_service_codes_common.go deleted file mode 100644 index e0c4fd85714..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_service_codes_common.go +++ /dev/null @@ -1,131 +0,0 @@ -package azqueue - -// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes - -const ( - // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. - ServiceCodeNone ServiceCodeType = "" - - // ServiceCodeAccountAlreadyExists means the specified account already exists. - ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" - - // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). - ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" - - // ServiceCodeAccountIsDisabled means the specified account is disabled (403). - ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" - - // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). - ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" - - // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). - ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" - - // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). - ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" - - // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). - ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" - - // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). - ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" - - // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). - ServiceCodeInternalError ServiceCodeType = "InternalError" - - // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). - ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" - - // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). - ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" - - // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). - ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" - - // ServiceCodeInvalidInput means one of the request inputs is not valid (400). - ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" - - // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). - ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" - - // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). - ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" - - // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). - ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" - - // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). - ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" - - // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). - ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" - - // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). - ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" - - // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). - ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" - - // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). - ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" - - // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). - ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" - - // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). - ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" - - // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). - ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" - - // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). - ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" - - // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). - ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" - - // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). - ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" - - // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). - ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" - - // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). - ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" - - // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). - ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" - - // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). - ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" - - // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). - ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" - - // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). - ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" - - // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). - ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" - - // ServiceCodeResourceAlreadyExists means the specified resource already exists (409). - ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" - - // ServiceCodeResourceNotFound means the specified resource does not exist (404). - ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" - - // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). - ServiceCodeServerBusy ServiceCodeType = "ServerBusy" - - // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). - ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" - - // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). - ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" - - // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). - ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" - - // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). - ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" -) diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_storage_error.go deleted file mode 100644 index e276eb33b7c..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_storage_error.go +++ /dev/null @@ -1,111 +0,0 @@ -package azqueue - -import ( - "bytes" - "encoding/xml" - "fmt" - "net/http" - "sort" - - "github.com/Azure/azure-pipeline-go/pipeline" -) - -func init() { - // wire up our custom error handling constructor - responseErrorFactory = newStorageError -} - -// ServiceCodeType is a string identifying a storage service error. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 -type ServiceCodeType string - -// StorageError identifies a responder-generated network or response parsing error. -type StorageError interface { - // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). - ResponseError - - // ServiceCode returns a service error code. Your code can use this to make error recovery decisions. - ServiceCode() ServiceCodeType -} - -// storageError is the internal struct that implements the public StorageError interface. -type storageError struct { - responseError - serviceCode ServiceCodeType - details map[string]string -} - -// newStorageError creates an error object that implements the error interface. -func newStorageError(cause error, response *http.Response, description string) error { - return &storageError{ - responseError: responseError{ - ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), - response: response, - description: description, - }, - serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), - } -} - -// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. -func (e *storageError) ServiceCode() ServiceCodeType { - return e.serviceCode -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e *storageError) Error() string { - b := &bytes.Buffer{} - fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) - fmt.Fprintf(b, "Description=%s, Details: ", e.description) - if len(e.details) == 0 { - b.WriteString("(none)\n") - } else { - b.WriteRune('\n') - keys := make([]string, 0, len(e.details)) - // Alphabetize the details - for k := range e.details { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) - } - } - req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request - pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) - return e.ErrorNode.Error(b.String()) -} - -// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). -func (e *storageError) Temporary() bool { - if e.response != nil { - if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { - return true - } - } - return e.ErrorNode.Temporary() -} - -// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. -func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { - tokName := "" - var t xml.Token - for t, err = d.Token(); err == nil; t, err = d.Token() { - switch tt := t.(type) { - case xml.StartElement: - tokName = tt.Name.Local - break - case xml.CharData: - switch tokName { - case "Message": - e.description = string(tt) - default: - if e.details == nil { - e.details = map[string]string{} - } - e.details[tokName] = string(tt) - } - } - } - return nil -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_uuid.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_uuid.go deleted file mode 100644 index 06e1652bbae..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zc_uuid.go +++ /dev/null @@ -1,77 +0,0 @@ -package azqueue - -import ( - "crypto/rand" - "fmt" - "strconv" -) - -// The UUID reserved variants. -const ( - reservedNCS byte = 0x80 - reservedRFC4122 byte = 0x40 - reservedMicrosoft byte = 0x20 - reservedFuture byte = 0x00 -) - -// A UUID representation compliant with specification in RFC 4122 document. -type uuid [16]byte - -// NewUUID returns a new uuid using RFC 4122 algorithm. -func newUUID() (u uuid) { - u = uuid{} - // Set all bits to randomly (or pseudo-randomly) chosen values. - rand.Read(u[:]) - u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) - - var version byte = 4 - u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) - return -} - -// String returns an unparsed version of the generated UUID sequence. -func (u uuid) String() string { - return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" -// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. -func parseUUID(uuidStr string) uuid { - char := func(hexString string) byte { - i, _ := strconv.ParseUint(hexString, 16, 8) - return byte(i) - } - if uuidStr[0] == '{' { - uuidStr = uuidStr[1:] // Skip over the '{' - } - // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f - // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 - // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 - uuidVal := uuid{ - char(uuidStr[0:2]), - char(uuidStr[2:4]), - char(uuidStr[4:6]), - char(uuidStr[6:8]), - - char(uuidStr[9:11]), - char(uuidStr[11:13]), - - char(uuidStr[14:16]), - char(uuidStr[16:18]), - - char(uuidStr[19:21]), - char(uuidStr[21:23]), - - char(uuidStr[24:26]), - char(uuidStr[26:28]), - char(uuidStr[28:30]), - char(uuidStr[30:32]), - char(uuidStr[32:34]), - char(uuidStr[34:36]), - } - return uuidVal -} - -func (u uuid) bytes() []byte { - return u[:] -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_doc.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_doc.go deleted file mode 100644 index 5985a952ac6..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_doc.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2018 Microsoft Corporation. All rights reserved. -// Use of this source code is governed by an MIT -// license that can be found in the LICENSE file. - -/* -Package azqueue allows you to manipulate Azure Storage queues and their messages. - -URL Types - -The most common types you'll work with are the XxxURL types. The methods of these types make requests -against the Azure Storage Service. - - - ServiceURL's methods perform operations on a storage account. - - QueueURL's methods perform operations on an account's queue. - - MessagesURL's methods perform operations with a queue's messages. - -Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP -request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. -The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. - -Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass -an initial pipeline. When you call ServiceURL's NewQueueURL method, the new QueueURL object has its own -URL but it shares the same pipeline as the parent ServiceURL object. -To work with a queue's messages, call QueueURL's NewMessagesURL method. - -If you'd like to use a different pipeline with a ServiceURL, QueueURL, or MessagesURL object, then call the XxxURL -object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object -with the same URL as the original but with the specified pipeline. - -Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that -XxxURL objects share a lot of system resources making them very efficient. - -All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, -transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an -example of how to do deal with errors. - -URL and Shared Access Signature Manipulation - -The library includes a QueueURLParts type for deconstructing and reconstructing URLs. And you can use the following types -for generating and parsing Shared Access Signature (SAS) - - Use the AccountSASSignatureValues type to create a SAS for a storage account. - - Use the QueueSASSignatureValues type to create a SAS for a queue. - - Use the SASQueryParameters type to examine SAS query parameres. - -To generate a SAS, you must use the SharedKeyCredential type. - -Credentials - -When creating a request pipeline, you must specify one of this package's credential types. - - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). - - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this - to generate Shared Access Signatures. - -HTTP Request Policy Factories - -This package defines several request policy factories for use with the pipeline package. -Most applications will not use these factories directly; instead, the NewPipeline -function creates these factories, initializes them (via the PipelineOptions type) -and returns a pipeline object for use by the XxxURL objects. - -However, for advanced scenarios, developers can access these policy factories directly -and even create their own and then construct their own pipeline in order to affect HTTP -requests and responses performed by the XxxURL objects. For example, developers can -introduce their own logging, random failures, request recording & playback for fast -testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The -possibilities are endless! - -Below are the request pipeline policy factory functions that are provided with this -package: - - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. - - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. - - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. - - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. - -Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. -*/ -package azqueue - -// TokenCredential Use this to access resources using Role-Based Access Control (RBAC). diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_url_service_test.goX b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_url_service_test.goX deleted file mode 100644 index 00235776660..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zt_url_service_test.goX +++ /dev/null @@ -1,132 +0,0 @@ -package azqueue_test - -/* -import ( - "context" - "fmt" - "net/url" - "os" - "testing" - - chk "gopkg.in/check.v1" // go get gopkg.in/check.v1 -) - -// Hook up gocheck to testing -func Test(t *testing.T) { chk.TestingT(t) } - -type StorageAccountSuite struct{} - -var _ = chk.Suite(&StorageAccountSuite{}) - -func getStorageAccount(c *chk.C) ServiceURL { - name := os.Getenv("ACCOUNT_NAME") - key := os.Getenv("ACCOUNT_KEY") - if name == "" || key == "" { - panic("ACCOUNT_NAME and ACCOUNT_KEY environment vars must be set before running tests") - } - u, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", name)) - c.Assert(err, chk.IsNil) - - credential := NewSharedKeyCredential(name, key) - pipeline := NewPipeline(credential, PipelineOptions{}) - return NewServiceURL(*u, pipeline) -} - -/*func (s *StorageAccountSuite) TestGetSetProperties(c *chk.C) { - sa := getStorageAccount(c) - setProps := StorageServiceProperties{} - resp, err := sa.SetProperties(context.Background(), setProps) - c.Assert(err, chk.IsNil) - c.Assert(resp.Response().StatusCode, chk.Equals, 202) - c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") - c.Assert(resp.Version(), chk.Not(chk.Equals), "") - - props, err := sa.GetProperties(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(props.Response().StatusCode, chk.Equals, 200) - c.Assert(props.RequestID(), chk.Not(chk.Equals), "") - c.Assert(props.Version(), chk.Not(chk.Equals), "") - c.Assert(props.Logging, chk.NotNil) - c.Assert(props.HourMetrics, chk.NotNil) - c.Assert(props.MinuteMetrics, chk.NotNil) - c.Assert(props.Cors, chk.HasLen, 0) - c.Assert(props.DefaultServiceVersion, chk.IsNil) // TODO: this seems like a bug -} - -func (s *StorageAccountSuite) TestGetStatus(c *chk.C) { - sa := getStorageAccount(c) - if !strings.Contains(sa.URL().Path, "-secondary") { - c.Skip("only applicable on secondary storage accounts") - } - stats, err := sa.GetStats(context.Background()) - c.Assert(err, chk.IsNil) - c.Assert(stats, chk.NotNil) -}*/ - -/* -func (s *StorageAccountSuite) TestListContainers(c *chk.C) { - sa := getStorageAccount(c) - resp, err := sa.ListContainers(context.Background(), Marker{}, ListContainersOptions{Prefix: containerPrefix}) - c.Assert(err, chk.IsNil) - c.Assert(resp.Response().StatusCode, chk.Equals, 200) - c.Assert(resp.RequestID(), chk.Not(chk.Equals), "") - c.Assert(resp.Version(), chk.Not(chk.Equals), "") - c.Assert(resp.Containers, chk.HasLen, 0) - c.Assert(resp.ServiceEndpoint, chk.NotNil) - - container := getContainer(c) - defer delContainer(c, container) - - md := Metadata{ - "foo": "foovalue", - "bar": "barvalue", - } - _, err = container.SetMetadata(context.Background(), md, ContainerAccessConditions{}) - c.Assert(err, chk.IsNil) - - resp, err = sa.ListContainers(context.Background(), Marker{}, ListContainersOptions{Detail: ListContainersDetail{Metadata: true}, Prefix: containerPrefix}) - c.Assert(err, chk.IsNil) - c.Assert(resp.Containers, chk.HasLen, 1) - c.Assert(resp.Containers[0].Name, chk.NotNil) - c.Assert(resp.Containers[0].Properties, chk.NotNil) - c.Assert(resp.Containers[0].Properties.LastModified, chk.NotNil) - c.Assert(resp.Containers[0].Properties.Etag, chk.NotNil) - c.Assert(resp.Containers[0].Properties.LeaseStatus, chk.Equals, LeaseStatusUnlocked) - c.Assert(resp.Containers[0].Properties.LeaseState, chk.Equals, LeaseStateAvailable) - c.Assert(string(resp.Containers[0].Properties.LeaseDuration), chk.Equals, "") - c.Assert(string(resp.Containers[0].Properties.PublicAccess), chk.Equals, "") - c.Assert(resp.Containers[0].Metadata, chk.DeepEquals, md) -} - -func (s *StorageAccountSuite) TestListContainersPaged(c *chk.C) { - sa := getStorageAccount(c) - - const numContainers = 4 - const maxResultsPerPage = 2 - const pagedContainersPrefix = "azblobspagedtest" - - containers := make([]ContainerURL, numContainers) - for i := 0; i < numContainers; i++ { - containers[i] = getContainerWithPrefix(c, pagedContainersPrefix) - } - - defer func() { - for i := range containers { - delContainer(c, containers[i]) - } - }() - - marker := Marker{} - iterations := numContainers / maxResultsPerPage - - for i := 0; i < iterations; i++ { - resp, err := sa.ListContainers(context.Background(), marker, ListContainersOptions{MaxResults: maxResultsPerPage, Prefix: pagedContainersPrefix}) - c.Assert(err, chk.IsNil) - c.Assert(resp.Containers, chk.HasLen, maxResultsPerPage) - - hasMore := i < iterations-1 - c.Assert(resp.NextMarker.NotDone(), chk.Equals, hasMore) - marker = resp.NextMarker - } -} -*/ \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_client.go deleted file mode 100644 index c90d024efbc..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_client.go +++ /dev/null @@ -1,38 +0,0 @@ -package azqueue - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "github.com/Azure/azure-pipeline-go/pipeline" - "net/url" -) - -const ( - // ServiceVersion specifies the version of the operations used in this package. - ServiceVersion = "2018-03-28" -) - -// managementClient is the base client for Azqueue. -type managementClient struct { - url url.URL - p pipeline.Pipeline -} - -// newManagementClient creates an instance of the managementClient client. -func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { - return managementClient{ - url: url, - p: p, - } -} - -// URL returns a copy of the URL for this client. -func (mc managementClient) URL() url.URL { - return mc.url -} - -// Pipeline returns the pipeline for this client. -func (mc managementClient) Pipeline() pipeline.Pipeline { - return mc.p -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_message_id.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_message_id.go deleted file mode 100644 index fea20986ace..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_message_id.go +++ /dev/null @@ -1,157 +0,0 @@ -package azqueue - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// messageIDClient is the client for the MessageID methods of the Azqueue service. -type messageIDClient struct { - managementClient -} - -// newMessageIDClient creates an instance of the messageIDClient client. -func newMessageIDClient(url url.URL, p pipeline.Pipeline) messageIDClient { - return messageIDClient{newManagementClient(url, p)} -} - -// Delete the Delete operation deletes the specified message. -// -// popReceipt is required. Specifies the valid pop receipt value returned from an earlier call to the Get Messages or -// Update Message operation. timeout is the The timeout parameter is expressed in seconds. For more information, see 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// Enqueue the Enqueue operation adds a new message to the back of the message queue. A visibility timeout can also be -// specified to make the message invisible until the visibility timeout expires. A message must be in a format that can -// be included in an XML request with UTF-8 encoding. The encoded message can be up to 64 KB in size for versions -// 2011-08-18 and newer, or 8 KB in size for previous versions. -// -// queueMessage is a Message object which can be stored in a Queue visibilitytimeout is optional. Specifies the new -// visibility timeout value, in seconds, relative to server time. The default value is 30 seconds. A specified value -// must be larger than or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol -// versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value later than the -// expiry time. messageTimeToLive is optional. Specifies the time-to-live interval for the message, in seconds. Prior -// to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version 2017-07-29 or later, the maximum -// time-to-live can be any positive number, as well as -1 indicating that the message does not expire. If this -// parameter is omitted, the default time-to-live is 7 days. timeout is the The timeout parameter is expressed in -// seconds. For more information, see 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// Peek the Peek operation retrieves one or more messages from the front of the queue, but does not alter the -// visibility of the message. -// -// numberOfMessages is optional. A nonzero integer value that specifies the number of messages to retrieve from the -// queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message -// is retrieved from the queue with this operation. timeout is the The timeout parameter is expressed in seconds. For -// more information, see 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_models.go deleted file mode 100644 index a925b8273b7..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_models.go +++ /dev/null @@ -1,1348 +0,0 @@ -package azqueue - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "encoding/xml" - "errors" - "net/http" - "reflect" - "strconv" - "strings" - "time" - "unsafe" -) - -// Metadata contains metadata key/value pairs. -type Metadata map[string]string - -const mdPrefix = "x-ms-meta-" - -const mdPrefixLen = len(mdPrefix) - -// UnmarshalXML implements the xml.Unmarshaler interface for Metadata. -func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - tokName := "" - for t, err := d.Token(); err == nil; t, err = d.Token() { - switch tt := t.(type) { - case xml.StartElement: - tokName = strings.ToLower(tt.Name.Local) - break - case xml.CharData: - if *md == nil { - *md = Metadata{} - } - (*md)[tokName] = string(tt) - break - } - } - return nil -} - -// Marker represents an opaque value used in paged responses. -type Marker struct { - Val *string -} - -// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true -// for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from -// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only -// after the service has returned the final result portion. -func (m Marker) NotDone() bool { - return m.Val == nil || *m.Val != "" -} - -// UnmarshalXML implements the xml.Unmarshaler interface for Marker. -func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var out string - err := d.DecodeElement(&out, &start) - m.Val = &out - return err -} - -// concatenates a slice of const values with the specified separator between each item -func joinConst(s interface{}, sep string) string { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - panic("s wasn't a slice or array") - } - ss := make([]string, 0, v.Len()) - for i := 0; i < v.Len(); i++ { - ss = append(ss, v.Index(i).String()) - } - return strings.Join(ss, sep) -} - -func validateError(err error) { - if err != nil { - panic(err) - } -} - -// GeoReplicationStatusType enumerates the values for geo replication status type. -type GeoReplicationStatusType string - -const ( - // GeoReplicationStatusBootstrap ... - GeoReplicationStatusBootstrap GeoReplicationStatusType = "bootstrap" - // GeoReplicationStatusLive ... - GeoReplicationStatusLive GeoReplicationStatusType = "live" - // GeoReplicationStatusNone represents an empty GeoReplicationStatusType. - GeoReplicationStatusNone GeoReplicationStatusType = "" - // GeoReplicationStatusUnavailable ... - GeoReplicationStatusUnavailable GeoReplicationStatusType = "unavailable" -) - -// PossibleGeoReplicationStatusTypeValues returns an array of possible values for the GeoReplicationStatusType const type. -func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType { - return []GeoReplicationStatusType{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusNone, GeoReplicationStatusUnavailable} -} - -// ListQueuesIncludeType enumerates the values for list queues include type. -type ListQueuesIncludeType string - -const ( - // ListQueuesIncludeMetadata ... - ListQueuesIncludeMetadata ListQueuesIncludeType = "metadata" - // ListQueuesIncludeNone represents an empty ListQueuesIncludeType. - ListQueuesIncludeNone ListQueuesIncludeType = "" -) - -// PossibleListQueuesIncludeTypeValues returns an array of possible values for the ListQueuesIncludeType const type. -func PossibleListQueuesIncludeTypeValues() []ListQueuesIncludeType { - return []ListQueuesIncludeType{ListQueuesIncludeMetadata, ListQueuesIncludeNone} -} - -// StorageErrorCodeType enumerates the values for storage error code type. -type StorageErrorCodeType string - -const ( - // StorageErrorCodeAccountAlreadyExists ... - StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists" - // StorageErrorCodeAccountBeingCreated ... - StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated" - // StorageErrorCodeAccountIsDisabled ... - StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled" - // StorageErrorCodeAuthenticationFailed ... - StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" - // StorageErrorCodeConditionHeadersNotSupported ... - StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported" - // StorageErrorCodeConditionNotMet ... - StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet" - // StorageErrorCodeEmptyMetadataKey ... - StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey" - // StorageErrorCodeInsufficientAccountPermissions ... - StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions" - // StorageErrorCodeInternalError ... - StorageErrorCodeInternalError StorageErrorCodeType = "InternalError" - // StorageErrorCodeInvalidAuthenticationInfo ... - StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo" - // StorageErrorCodeInvalidHeaderValue ... - StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue" - // StorageErrorCodeInvalidHTTPVerb ... - StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb" - // StorageErrorCodeInvalidInput ... - StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput" - // StorageErrorCodeInvalidMarker ... - StorageErrorCodeInvalidMarker StorageErrorCodeType = "InvalidMarker" - // StorageErrorCodeInvalidMd5 ... - StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5" - // StorageErrorCodeInvalidMetadata ... - StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata" - // StorageErrorCodeInvalidQueryParameterValue ... - StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue" - // StorageErrorCodeInvalidRange ... - StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange" - // StorageErrorCodeInvalidResourceName ... - StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName" - // StorageErrorCodeInvalidURI ... - StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri" - // StorageErrorCodeInvalidXMLDocument ... - StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument" - // StorageErrorCodeInvalidXMLNodeValue ... - StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue" - // StorageErrorCodeMd5Mismatch ... - StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch" - // StorageErrorCodeMessageNotFound ... - StorageErrorCodeMessageNotFound StorageErrorCodeType = "MessageNotFound" - // StorageErrorCodeMessageTooLarge ... - StorageErrorCodeMessageTooLarge StorageErrorCodeType = "MessageTooLarge" - // StorageErrorCodeMetadataTooLarge ... - StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge" - // StorageErrorCodeMissingContentLengthHeader ... - StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader" - // StorageErrorCodeMissingRequiredHeader ... - StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader" - // StorageErrorCodeMissingRequiredQueryParameter ... - StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter" - // StorageErrorCodeMissingRequiredXMLNode ... - StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" - // StorageErrorCodeMultipleConditionHeadersNotSupported ... - StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" - // StorageErrorCodeNone represents an empty StorageErrorCodeType. - StorageErrorCodeNone StorageErrorCodeType = "" - // StorageErrorCodeOperationTimedOut ... - StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut" - // StorageErrorCodeOutOfRangeInput ... - StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput" - // StorageErrorCodeOutOfRangeQueryParameterValue ... - StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue" - // StorageErrorCodePopReceiptMismatch ... - StorageErrorCodePopReceiptMismatch StorageErrorCodeType = "PopReceiptMismatch" - // StorageErrorCodeQueueAlreadyExists ... - StorageErrorCodeQueueAlreadyExists StorageErrorCodeType = "QueueAlreadyExists" - // StorageErrorCodeQueueBeingDeleted ... - StorageErrorCodeQueueBeingDeleted StorageErrorCodeType = "QueueBeingDeleted" - // StorageErrorCodeQueueDisabled ... - StorageErrorCodeQueueDisabled StorageErrorCodeType = "QueueDisabled" - // StorageErrorCodeQueueNotEmpty ... - StorageErrorCodeQueueNotEmpty StorageErrorCodeType = "QueueNotEmpty" - // StorageErrorCodeQueueNotFound ... - StorageErrorCodeQueueNotFound StorageErrorCodeType = "QueueNotFound" - // StorageErrorCodeRequestBodyTooLarge ... - StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge" - // StorageErrorCodeRequestURLFailedToParse ... - StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse" - // StorageErrorCodeResourceAlreadyExists ... - StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists" - // StorageErrorCodeResourceNotFound ... - StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound" - // StorageErrorCodeResourceTypeMismatch ... - StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch" - // StorageErrorCodeServerBusy ... - StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy" - // StorageErrorCodeUnsupportedHeader ... - StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader" - // StorageErrorCodeUnsupportedHTTPVerb ... - StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb" - // StorageErrorCodeUnsupportedQueryParameter ... - StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter" - // StorageErrorCodeUnsupportedXMLNode ... - StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode" -) - -// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. -func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { - return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAuthenticationFailed, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMarker, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeMd5Mismatch, StorageErrorCodeMessageNotFound, StorageErrorCodeMessageTooLarge, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNone, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePopReceiptMismatch, StorageErrorCodeQueueAlreadyExists, StorageErrorCodeQueueBeingDeleted, StorageErrorCodeQueueDisabled, StorageErrorCodeQueueNotEmpty, StorageErrorCodeQueueNotFound, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeServerBusy, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} -} - -// AccessPolicy - An Access policy -type AccessPolicy struct { - // Start - the date-time the policy is active - Start time.Time `xml:"Start"` - // Expiry - the date-time the policy expires - Expiry time.Time `xml:"Expiry"` - // Permission - the permissions for the acl policy - Permission string `xml:"Permission"` -} - -// MarshalXML implements the xml.Marshaler interface for AccessPolicy. -func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - ap2 := (*accessPolicy)(unsafe.Pointer(&ap)) - return e.EncodeElement(*ap2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy. -func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - ap2 := (*accessPolicy)(unsafe.Pointer(ap)) - return d.DecodeElement(ap2, &start) -} - -// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access -// resources in another domain. Web browsers implement a security restriction known as same-origin policy that -// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain -// (the origin domain) to call APIs in another domain -type CorsRule struct { - // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. - AllowedOrigins string `xml:"AllowedOrigins"` - // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) - AllowedMethods string `xml:"AllowedMethods"` - // AllowedHeaders - the request headers that the origin domain may specify on the CORS request. - AllowedHeaders string `xml:"AllowedHeaders"` - // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer - ExposedHeaders string `xml:"ExposedHeaders"` - // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request. - MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` -} - -// DequeuedMessageItem - The object returned in the QueueMessageList array when calling Get Messages on a -// Queue. -type DequeuedMessageItem struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessage"` - // MessageID - The Id of the Message. - MessageID string `xml:"MessageId"` - // InsertionTime - The time the Message was inserted into the Queue. - InsertionTime time.Time `xml:"InsertionTime"` - // ExpirationTime - The time that the Message will expire and be automatically deleted. - ExpirationTime time.Time `xml:"ExpirationTime"` - // PopReceipt - This value is required to delete the Message. If deletion fails using this popreceipt then the message has been dequeued by another client. - PopReceipt string `xml:"PopReceipt"` - // TimeNextVisible - The time that the message will again become visible in the Queue. - TimeNextVisible time.Time `xml:"TimeNextVisible"` - // DequeueCount - The number of times the message has been dequeued. - DequeueCount int64 `xml:"DequeueCount"` - // MessageText - The content of the Message. - MessageText string `xml:"MessageText"` -} - -// MarshalXML implements the xml.Marshaler interface for DequeuedMessageItem. -func (dmi DequeuedMessageItem) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - dmi2 := (*dequeuedMessageItem)(unsafe.Pointer(&dmi)) - return e.EncodeElement(*dmi2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for DequeuedMessageItem. -func (dmi *DequeuedMessageItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - dmi2 := (*dequeuedMessageItem)(unsafe.Pointer(dmi)) - return d.DecodeElement(dmi2, &start) -} - -// EnqueuedMessage - The object returned in the QueueMessageList array when calling Put Message on a Queue -type EnqueuedMessage struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessage"` - // MessageID - The Id of the Message. - MessageID string `xml:"MessageId"` - // InsertionTime - The time the Message was inserted into the Queue. - InsertionTime time.Time `xml:"InsertionTime"` - // ExpirationTime - The time that the Message will expire and be automatically deleted. - ExpirationTime time.Time `xml:"ExpirationTime"` - // PopReceipt - This value is required to delete the Message. If deletion fails using this popreceipt then the message has been dequeued by another client. - PopReceipt string `xml:"PopReceipt"` - // TimeNextVisible - The time that the message will again become visible in the Queue. - TimeNextVisible time.Time `xml:"TimeNextVisible"` -} - -// MarshalXML implements the xml.Marshaler interface for EnqueuedMessage. -func (em EnqueuedMessage) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - em2 := (*enqueuedMessage)(unsafe.Pointer(&em)) - return e.EncodeElement(*em2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for EnqueuedMessage. -func (em *EnqueuedMessage) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - em2 := (*enqueuedMessage)(unsafe.Pointer(em)) - return d.DecodeElement(em2, &start) -} - -// EnqueueResponse - Wraps the response from the messagesClient.Enqueue method. -type EnqueueResponse struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessagesList"` - Items []EnqueuedMessage `xml:"QueueMessage"` -} - -// Response returns the raw HTTP response object. -func (er EnqueueResponse) Response() *http.Response { - return er.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (er EnqueueResponse) StatusCode() int { - return er.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (er EnqueueResponse) Status() string { - return er.rawResponse.Status -} - -// Date returns the value for header Date. -func (er EnqueueResponse) Date() time.Time { - s := er.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (er EnqueueResponse) ErrorCode() string { - return er.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (er EnqueueResponse) RequestID() string { - return er.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (er EnqueueResponse) Version() string { - return er.rawResponse.Header.Get("x-ms-version") -} - -// GeoReplication ... -type GeoReplication struct { - // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' - Status GeoReplicationStatusType `xml:"Status"` - // LastSyncTime - A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. - LastSyncTime time.Time `xml:"LastSyncTime"` -} - -// MarshalXML implements the xml.Marshaler interface for GeoReplication. -func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - gr2 := (*geoReplication)(unsafe.Pointer(&gr)) - return e.EncodeElement(*gr2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication. -func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - gr2 := (*geoReplication)(unsafe.Pointer(gr)) - return d.DecodeElement(gr2, &start) -} - -// ListQueuesSegmentResponse - The object returned when calling List Queues on a Queue Service. -type ListQueuesSegmentResponse struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"EnumerationResults"` - ServiceEndpoint string `xml:"ServiceEndpoint,attr"` - Prefix string `xml:"Prefix"` - Marker *string `xml:"Marker"` - MaxResults int32 `xml:"MaxResults"` - QueueItems []QueueItem `xml:"Queues>Queue"` - NextMarker Marker `xml:"NextMarker"` -} - -// Response returns the raw HTTP response object. -func (lqsr ListQueuesSegmentResponse) Response() *http.Response { - return lqsr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (lqsr ListQueuesSegmentResponse) StatusCode() int { - return lqsr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (lqsr ListQueuesSegmentResponse) Status() string { - return lqsr.rawResponse.Status -} - -// Date returns the value for header Date. -func (lqsr ListQueuesSegmentResponse) Date() time.Time { - s := lqsr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (lqsr ListQueuesSegmentResponse) ErrorCode() string { - return lqsr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (lqsr ListQueuesSegmentResponse) RequestID() string { - return lqsr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (lqsr ListQueuesSegmentResponse) Version() string { - return lqsr.rawResponse.Header.Get("x-ms-version") -} - -// Logging - Azure Analytics Logging settings. -type Logging struct { - // Version - The version of Storage Analytics to configure. - Version string `xml:"Version"` - // Delete - Indicates whether all delete requests should be logged. - Delete bool `xml:"Delete"` - // Read - Indicates whether all read requests should be logged. - Read bool `xml:"Read"` - // Write - Indicates whether all write requests should be logged. - Write bool `xml:"Write"` - RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` -} - -// MessageIDDeleteResponse ... -type MessageIDDeleteResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (midr MessageIDDeleteResponse) Response() *http.Response { - return midr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (midr MessageIDDeleteResponse) StatusCode() int { - return midr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (midr MessageIDDeleteResponse) Status() string { - return midr.rawResponse.Status -} - -// Date returns the value for header Date. -func (midr MessageIDDeleteResponse) Date() time.Time { - s := midr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (midr MessageIDDeleteResponse) ErrorCode() string { - return midr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (midr MessageIDDeleteResponse) RequestID() string { - return midr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (midr MessageIDDeleteResponse) Version() string { - return midr.rawResponse.Header.Get("x-ms-version") -} - -// MessageIDUpdateResponse ... -type MessageIDUpdateResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (miur MessageIDUpdateResponse) Response() *http.Response { - return miur.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (miur MessageIDUpdateResponse) StatusCode() int { - return miur.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (miur MessageIDUpdateResponse) Status() string { - return miur.rawResponse.Status -} - -// Date returns the value for header Date. -func (miur MessageIDUpdateResponse) Date() time.Time { - s := miur.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (miur MessageIDUpdateResponse) ErrorCode() string { - return miur.rawResponse.Header.Get("x-ms-error-code") -} - -// PopReceipt returns the value for header x-ms-popreceipt. -func (miur MessageIDUpdateResponse) PopReceipt() string { - return miur.rawResponse.Header.Get("x-ms-popreceipt") -} - -// RequestID returns the value for header x-ms-request-id. -func (miur MessageIDUpdateResponse) RequestID() string { - return miur.rawResponse.Header.Get("x-ms-request-id") -} - -// TimeNextVisible returns the value for header x-ms-time-next-visible. -func (miur MessageIDUpdateResponse) TimeNextVisible() time.Time { - s := miur.rawResponse.Header.Get("x-ms-time-next-visible") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// Version returns the value for header x-ms-version. -func (miur MessageIDUpdateResponse) Version() string { - return miur.rawResponse.Header.Get("x-ms-version") -} - -// MessagesClearResponse ... -type MessagesClearResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (mcr MessagesClearResponse) Response() *http.Response { - return mcr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (mcr MessagesClearResponse) StatusCode() int { - return mcr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (mcr MessagesClearResponse) Status() string { - return mcr.rawResponse.Status -} - -// Date returns the value for header Date. -func (mcr MessagesClearResponse) Date() time.Time { - s := mcr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (mcr MessagesClearResponse) ErrorCode() string { - return mcr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (mcr MessagesClearResponse) RequestID() string { - return mcr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (mcr MessagesClearResponse) Version() string { - return mcr.rawResponse.Header.Get("x-ms-version") -} - -// Metrics ... -type Metrics struct { - // Version - The version of Storage Analytics to configure. - Version *string `xml:"Version"` - // Enabled - Indicates whether metrics are enabled for the Queue service. - Enabled bool `xml:"Enabled"` - // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations. - IncludeAPIs *bool `xml:"IncludeAPIs"` - RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` -} - -// PeekedMessageItem - The object returned in the QueueMessageList array when calling Peek Messages on a Queue -type PeekedMessageItem struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessage"` - // MessageID - The Id of the Message. - MessageID string `xml:"MessageId"` - // InsertionTime - The time the Message was inserted into the Queue. - InsertionTime time.Time `xml:"InsertionTime"` - // ExpirationTime - The time that the Message will expire and be automatically deleted. - ExpirationTime time.Time `xml:"ExpirationTime"` - // DequeueCount - The number of times the message has been dequeued. - DequeueCount int64 `xml:"DequeueCount"` - // MessageText - The content of the Message. - MessageText string `xml:"MessageText"` -} - -// MarshalXML implements the xml.Marshaler interface for PeekedMessageItem. -func (pmi PeekedMessageItem) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - pmi2 := (*peekedMessageItem)(unsafe.Pointer(&pmi)) - return e.EncodeElement(*pmi2, start) -} - -// UnmarshalXML implements the xml.Unmarshaler interface for PeekedMessageItem. -func (pmi *PeekedMessageItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - pmi2 := (*peekedMessageItem)(unsafe.Pointer(pmi)) - return d.DecodeElement(pmi2, &start) -} - -// PeekResponse - Wraps the response from the messagesClient.Peek method. -type PeekResponse struct { - rawResponse *http.Response - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessagesList"` - Items []PeekedMessageItem `xml:"QueueMessage"` -} - -// Response returns the raw HTTP response object. -func (pr PeekResponse) Response() *http.Response { - return pr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (pr PeekResponse) StatusCode() int { - return pr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (pr PeekResponse) Status() string { - return pr.rawResponse.Status -} - -// Date returns the value for header Date. -func (pr PeekResponse) Date() time.Time { - s := pr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (pr PeekResponse) ErrorCode() string { - return pr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (pr PeekResponse) RequestID() string { - return pr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (pr PeekResponse) Version() string { - return pr.rawResponse.Header.Get("x-ms-version") -} - -// QueueCreateResponse ... -type QueueCreateResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (qcr QueueCreateResponse) Response() *http.Response { - return qcr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (qcr QueueCreateResponse) StatusCode() int { - return qcr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (qcr QueueCreateResponse) Status() string { - return qcr.rawResponse.Status -} - -// Date returns the value for header Date. -func (qcr QueueCreateResponse) Date() time.Time { - s := qcr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (qcr QueueCreateResponse) ErrorCode() string { - return qcr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (qcr QueueCreateResponse) RequestID() string { - return qcr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (qcr QueueCreateResponse) Version() string { - return qcr.rawResponse.Header.Get("x-ms-version") -} - -// QueueDeleteResponse ... -type QueueDeleteResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (qdr QueueDeleteResponse) Response() *http.Response { - return qdr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (qdr QueueDeleteResponse) StatusCode() int { - return qdr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (qdr QueueDeleteResponse) Status() string { - return qdr.rawResponse.Status -} - -// Date returns the value for header Date. -func (qdr QueueDeleteResponse) Date() time.Time { - s := qdr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (qdr QueueDeleteResponse) ErrorCode() string { - return qdr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (qdr QueueDeleteResponse) RequestID() string { - return qdr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (qdr QueueDeleteResponse) Version() string { - return qdr.rawResponse.Header.Get("x-ms-version") -} - -// QueueGetPropertiesResponse ... -type QueueGetPropertiesResponse struct { - rawResponse *http.Response -} - -// NewMetadata returns user-defined key/value pairs. -func (qgpr QueueGetPropertiesResponse) NewMetadata() Metadata { - md := Metadata{} - for k, v := range qgpr.rawResponse.Header { - if len(k) > mdPrefixLen { - if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { - md[strings.ToLower(k[mdPrefixLen:])] = v[0] - } - } - } - return md -} - -// Response returns the raw HTTP response object. -func (qgpr QueueGetPropertiesResponse) Response() *http.Response { - return qgpr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (qgpr QueueGetPropertiesResponse) StatusCode() int { - return qgpr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (qgpr QueueGetPropertiesResponse) Status() string { - return qgpr.rawResponse.Status -} - -// ApproximateMessagesCount returns the value for header x-ms-approximate-messages-count. -func (qgpr QueueGetPropertiesResponse) ApproximateMessagesCount() int32 { - s := qgpr.rawResponse.Header.Get("x-ms-approximate-messages-count") - if s == "" { - return -1 - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - i = 0 - } - return int32(i) -} - -// Date returns the value for header Date. -func (qgpr QueueGetPropertiesResponse) Date() time.Time { - s := qgpr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (qgpr QueueGetPropertiesResponse) ErrorCode() string { - return qgpr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (qgpr QueueGetPropertiesResponse) RequestID() string { - return qgpr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (qgpr QueueGetPropertiesResponse) Version() string { - return qgpr.rawResponse.Header.Get("x-ms-version") -} - -// QueueItem - An Azure Storage Queue. -type QueueItem struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Queue"` - // Name - The name of the Queue. - Name string `xml:"Name"` - Metadata Metadata `xml:"Metadata"` -} - -// QueueMessage - A Message object which can be stored in a Queue -type QueueMessage struct { - // MessageText - The content of the message - MessageText string `xml:"MessageText"` -} - -// QueueMessagesList - Wraps the response from the messagesClient.Dequeue method. -type QueueMessagesList struct { - rawResponse *http.Response - Items []DequeuedMessageItem `xml:"QueueMessage"` -} - -// Response returns the raw HTTP response object. -func (qml QueueMessagesList) Response() *http.Response { - return qml.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (qml QueueMessagesList) StatusCode() int { - return qml.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (qml QueueMessagesList) Status() string { - return qml.rawResponse.Status -} - -// Date returns the value for header Date. -func (qml QueueMessagesList) Date() time.Time { - s := qml.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (qml QueueMessagesList) ErrorCode() string { - return qml.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (qml QueueMessagesList) RequestID() string { - return qml.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (qml QueueMessagesList) Version() string { - return qml.rawResponse.Header.Get("x-ms-version") -} - -// QueueSetAccessPolicyResponse ... -type QueueSetAccessPolicyResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (qsapr QueueSetAccessPolicyResponse) Response() *http.Response { - return qsapr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (qsapr QueueSetAccessPolicyResponse) StatusCode() int { - return qsapr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (qsapr QueueSetAccessPolicyResponse) Status() string { - return qsapr.rawResponse.Status -} - -// Date returns the value for header Date. -func (qsapr QueueSetAccessPolicyResponse) Date() time.Time { - s := qsapr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (qsapr QueueSetAccessPolicyResponse) ErrorCode() string { - return qsapr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (qsapr QueueSetAccessPolicyResponse) RequestID() string { - return qsapr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (qsapr QueueSetAccessPolicyResponse) Version() string { - return qsapr.rawResponse.Header.Get("x-ms-version") -} - -// QueueSetMetadataResponse ... -type QueueSetMetadataResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (qsmr QueueSetMetadataResponse) Response() *http.Response { - return qsmr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (qsmr QueueSetMetadataResponse) StatusCode() int { - return qsmr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (qsmr QueueSetMetadataResponse) Status() string { - return qsmr.rawResponse.Status -} - -// Date returns the value for header Date. -func (qsmr QueueSetMetadataResponse) Date() time.Time { - s := qsmr.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (qsmr QueueSetMetadataResponse) ErrorCode() string { - return qsmr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (qsmr QueueSetMetadataResponse) RequestID() string { - return qsmr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (qsmr QueueSetMetadataResponse) Version() string { - return qsmr.rawResponse.Header.Get("x-ms-version") -} - -// RetentionPolicy - the retention policy -type RetentionPolicy struct { - // Enabled - Indicates whether a retention policy is enabled for the storage service - Enabled bool `xml:"Enabled"` - // Days - Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted - Days *int32 `xml:"Days"` -} - -// ServiceSetPropertiesResponse ... -type ServiceSetPropertiesResponse struct { - rawResponse *http.Response -} - -// Response returns the raw HTTP response object. -func (sspr ServiceSetPropertiesResponse) Response() *http.Response { - return sspr.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (sspr ServiceSetPropertiesResponse) StatusCode() int { - return sspr.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (sspr ServiceSetPropertiesResponse) Status() string { - return sspr.rawResponse.Status -} - -// ErrorCode returns the value for header x-ms-error-code. -func (sspr ServiceSetPropertiesResponse) ErrorCode() string { - return sspr.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (sspr ServiceSetPropertiesResponse) RequestID() string { - return sspr.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (sspr ServiceSetPropertiesResponse) Version() string { - return sspr.rawResponse.Header.Get("x-ms-version") -} - -// SignedIdentifier - signed identifier -type SignedIdentifier struct { - // ID - a unique id - ID string `xml:"Id"` - // AccessPolicy - The access policy - AccessPolicy AccessPolicy `xml:"AccessPolicy"` -} - -// SignedIdentifiers - Wraps the response from the queueClient.GetAccessPolicy method. -type SignedIdentifiers struct { - rawResponse *http.Response - Items []SignedIdentifier `xml:"SignedIdentifier"` -} - -// Response returns the raw HTTP response object. -func (si SignedIdentifiers) Response() *http.Response { - return si.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (si SignedIdentifiers) StatusCode() int { - return si.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (si SignedIdentifiers) Status() string { - return si.rawResponse.Status -} - -// Date returns the value for header Date. -func (si SignedIdentifiers) Date() time.Time { - s := si.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (si SignedIdentifiers) ErrorCode() string { - return si.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (si SignedIdentifiers) RequestID() string { - return si.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (si SignedIdentifiers) Version() string { - return si.rawResponse.Header.Get("x-ms-version") -} - -// StorageServiceProperties - Storage Service Properties. -type StorageServiceProperties struct { - rawResponse *http.Response - // Logging - Azure Analytics Logging settings - Logging *Logging `xml:"Logging"` - // HourMetrics - A summary of request statistics grouped by API in hourly aggregates for queues - HourMetrics *Metrics `xml:"HourMetrics"` - // MinuteMetrics - a summary of request statistics grouped by API in minute aggregates for queues - MinuteMetrics *Metrics `xml:"MinuteMetrics"` - // Cors - The set of CORS rules. - Cors []CorsRule `xml:"Cors>CorsRule"` -} - -// Response returns the raw HTTP response object. -func (ssp StorageServiceProperties) Response() *http.Response { - return ssp.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (ssp StorageServiceProperties) StatusCode() int { - return ssp.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (ssp StorageServiceProperties) Status() string { - return ssp.rawResponse.Status -} - -// ErrorCode returns the value for header x-ms-error-code. -func (ssp StorageServiceProperties) ErrorCode() string { - return ssp.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (ssp StorageServiceProperties) RequestID() string { - return ssp.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (ssp StorageServiceProperties) Version() string { - return ssp.rawResponse.Header.Get("x-ms-version") -} - -// StorageServiceStats - Stats for the storage service. -type StorageServiceStats struct { - rawResponse *http.Response - // GeoReplication - Geo-Replication information for the Secondary Storage Service - GeoReplication *GeoReplication `xml:"GeoReplication"` -} - -// Response returns the raw HTTP response object. -func (sss StorageServiceStats) Response() *http.Response { - return sss.rawResponse -} - -// StatusCode returns the HTTP status code of the response, e.g. 200. -func (sss StorageServiceStats) StatusCode() int { - return sss.rawResponse.StatusCode -} - -// Status returns the HTTP status message of the response, e.g. "200 OK". -func (sss StorageServiceStats) Status() string { - return sss.rawResponse.Status -} - -// Date returns the value for header Date. -func (sss StorageServiceStats) Date() time.Time { - s := sss.rawResponse.Header.Get("Date") - if s == "" { - return time.Time{} - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - t = time.Time{} - } - return t -} - -// ErrorCode returns the value for header x-ms-error-code. -func (sss StorageServiceStats) ErrorCode() string { - return sss.rawResponse.Header.Get("x-ms-error-code") -} - -// RequestID returns the value for header x-ms-request-id. -func (sss StorageServiceStats) RequestID() string { - return sss.rawResponse.Header.Get("x-ms-request-id") -} - -// Version returns the value for header x-ms-version. -func (sss StorageServiceStats) Version() string { - return sss.rawResponse.Header.Get("x-ms-version") -} - -func init() { - if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between AccessPolicy and accessPolicy")) - } - if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between GeoReplication and geoReplication")) - } - if reflect.TypeOf((*DequeuedMessageItem)(nil)).Elem().Size() != reflect.TypeOf((*dequeuedMessageItem)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between DequeuedMessageItem and dequeuedMessageItem")) - } - if reflect.TypeOf((*PeekedMessageItem)(nil)).Elem().Size() != reflect.TypeOf((*peekedMessageItem)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between PeekedMessageItem and peekedMessageItem")) - } - if reflect.TypeOf((*EnqueuedMessage)(nil)).Elem().Size() != reflect.TypeOf((*enqueuedMessage)(nil)).Elem().Size() { - validateError(errors.New("size mismatch between EnqueuedMessage and enqueuedMessage")) - } -} - -const ( - rfc3339Format = "2006-01-02T15:04:05.0000000Z07:00" -) - -// used to convert times from UTC to GMT before sending across the wire -var gmt = time.FixedZone("GMT", 0) - -// internal type used for marshalling time in RFC1123 format -type timeRFC1123 struct { - time.Time -} - -// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123. -func (t timeRFC1123) MarshalText() ([]byte, error) { - return []byte(t.Format(time.RFC1123)), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123. -func (t *timeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = time.Parse(time.RFC1123, string(data)) - return -} - -// internal type used for marshalling time in RFC3339 format -type timeRFC3339 struct { - time.Time -} - -// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339. -func (t timeRFC3339) MarshalText() ([]byte, error) { - return []byte(t.Format(rfc3339Format)), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339. -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - t.Time, err = time.Parse(rfc3339Format, string(data)) - return -} - -// internal type used for marshalling -type accessPolicy struct { - Start timeRFC3339 `xml:"Start"` - Expiry timeRFC3339 `xml:"Expiry"` - Permission string `xml:"Permission"` -} - -// internal type used for marshalling -type geoReplication struct { - Status GeoReplicationStatusType `xml:"Status"` - LastSyncTime timeRFC1123 `xml:"LastSyncTime"` -} - -// internal type used for marshalling -type dequeuedMessageItem struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessage"` - MessageID string `xml:"MessageId"` - InsertionTime timeRFC1123 `xml:"InsertionTime"` - ExpirationTime timeRFC1123 `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible timeRFC1123 `xml:"TimeNextVisible"` - DequeueCount int64 `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// internal type used for marshalling -type peekedMessageItem struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessage"` - MessageID string `xml:"MessageId"` - InsertionTime timeRFC1123 `xml:"InsertionTime"` - ExpirationTime timeRFC1123 `xml:"ExpirationTime"` - DequeueCount int64 `xml:"DequeueCount"` - MessageText string `xml:"MessageText"` -} - -// internal type used for marshalling -type enqueuedMessage struct { - // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"QueueMessage"` - MessageID string `xml:"MessageId"` - InsertionTime timeRFC1123 `xml:"InsertionTime"` - ExpirationTime timeRFC1123 `xml:"ExpirationTime"` - PopReceipt string `xml:"PopReceipt"` - TimeNextVisible timeRFC1123 `xml:"TimeNextVisible"` -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_queue.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_queue.go deleted file mode 100644 index 4b4593d07cb..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_queue.go +++ /dev/null @@ -1,393 +0,0 @@ -package azqueue - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// queueClient is the client for the Queue methods of the Azqueue service. -type queueClient struct { - managementClient -} - -// newQueueClient creates an instance of the queueClient client. -func newQueueClient(url url.URL, p pipeline.Pipeline) queueClient { - return queueClient{newManagementClient(url, p)} -} - -// Create creates a new queue under the given account. -// -// timeout is the The timeout parameter is expressed in seconds. For more information, see 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// GetProperties retrieves user-defined metadata and queue properties on the specified queue. Metadata is associated -// with the queue as name-values pairs. -// -// timeout is the The timeout parameter is expressed in seconds. For more information, see 0 { - if err = xml.Unmarshal(b, &responseError); err != nil { - return NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return responseError -} - -// removes any BOM from the byte slice -func removeBOM(b []byte) []byte { - // UTF8 - return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_response_error.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_response_error.go deleted file mode 100644 index 1ee14385c09..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_response_error.go +++ /dev/null @@ -1,95 +0,0 @@ -package azqueue - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "fmt" - "github.com/Azure/azure-pipeline-go/pipeline" - "net" - "net/http" -) - -// if you want to provide custom error handling set this variable to your constructor function -var responseErrorFactory func(cause error, response *http.Response, description string) error - -// ResponseError identifies a responder-generated network or response parsing error. -type ResponseError interface { - // Error exposes the Error(), Temporary() and Timeout() methods. - net.Error // Includes the Go error interface - // Response returns the HTTP response. You may examine this but you should not modify it. - Response() *http.Response -} - -// NewResponseError creates an error object that implements the error interface. -func NewResponseError(cause error, response *http.Response, description string) error { - if responseErrorFactory != nil { - return responseErrorFactory(cause, response, description) - } - return &responseError{ - ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), - response: response, - description: description, - } -} - -// responseError is the internal struct that implements the public ResponseError interface. -type responseError struct { - pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause - response *http.Response - description string -} - -// Error implements the error interface's Error method to return a string representation of the error. -func (e *responseError) Error() string { - b := &bytes.Buffer{} - fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) - fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) - s := b.String() - return e.ErrorNode.Error(s) -} - -// Response implements the ResponseError interface's method to return the HTTP response. -func (e *responseError) Response() *http.Response { - return e.response -} - -// RFC7807 PROBLEM ------------------------------------------------------------------------------------ -// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members. -/*type RFC7807Problem struct { - // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation). - typeURI string // Should default to "about:blank" - // Optional: Short, human-readable summary (maybe localized). - title string - // Optional: HTTP status code generated by the origin server - status int - // Optional: Human-readable explanation for this problem occurance. - // Should help client correct the problem. Clients should NOT parse this string. - detail string - // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced). - instance string -} -// NewRFC7807Problem ... -func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { - return &RFC7807Problem{ - typeURI: typeURI, - status: status, - title: fmt.Sprintf(titleFormat, a...), - } -} -// Error returns the error information as a string. -func (e *RFC7807Problem) Error() string { - return e.title -} -// TypeURI ... -func (e *RFC7807Problem) TypeURI() string { - if e.typeURI == "" { - e.typeURI = "about:blank" - } - return e.typeURI -} -// Members ... -func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { - return e.status, e.title, e.detail, e.instance -}*/ diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_service.go deleted file mode 100644 index 92d8b848f46..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_service.go +++ /dev/null @@ -1,344 +0,0 @@ -package azqueue - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -import ( - "bytes" - "context" - "encoding/xml" - "github.com/Azure/azure-pipeline-go/pipeline" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// serviceClient is the client for the Service methods of the Azqueue service. -type serviceClient struct { - managementClient -} - -// newServiceClient creates an instance of the serviceClient client. -func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { - return serviceClient{newManagementClient(url, p)} -} - -// GetProperties gets the properties of a storage account's Queue service, including properties for Storage Analytics -// and CORS (Cross-Origin Resource Sharing) rules. -// -// timeout is the The timeout parameter is expressed in seconds. For more information, see 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// GetStatistics retrieves statistics related to replication for the Queue service. It is only available on the -// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. -// -// timeout is the The timeout parameter is expressed in seconds. For more information, see 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// ListQueuesSegment the List Queues Segment operation returns a list of the queues under the specified account -// -// prefix is filters the results to return only queues whose name begins with the specified prefix. marker is a string -// value that identifies the portion of the list of queues to be returned with the next listing operation. The -// operation returns the NextMarker value within the response body if the listing operation did not return all queues -// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter -// in a subsequent call to request the next page of list items. The marker value is opaque to the client. maxresults is -// specifies the maximum number of queues to return. If the request does not specify maxresults, or specifies a value -// greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition -// boundary, then the service will return a continuation token for retrieving the remainder of the results. For this -// reason, it is possible that the service will return fewer results than specified by maxresults, or than the default -// of 5000. include is include this parameter to specify that the queues's metadata be returned as part of the response -// body. timeout is the The timeout parameter is expressed in seconds. For more information, see 0 { - params.Set("prefix", *prefix) - } - if marker != nil && len(*marker) > 0 { - params.Set("marker", *marker) - } - if maxresults != nil { - params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) - } - if include != ListQueuesIncludeNone { - params.Set("include", string(include)) - } - if timeout != nil { - params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) - } - params.Set("comp", "list") - req.URL.RawQuery = params.Encode() - req.Header.Set("x-ms-version", ServiceVersion) - if requestID != nil { - req.Header.Set("x-ms-client-request-id", *requestID) - } - return req, nil -} - -// listQueuesSegmentResponder handles the response to the ListQueuesSegment request. -func (client serviceClient) listQueuesSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { - err := validateResponse(resp, http.StatusOK) - if resp == nil { - return nil, err - } - result := &ListQueuesSegmentResponse{rawResponse: resp.Response()} - if err != nil { - return result, err - } - defer resp.Response().Body.Close() - b, err := ioutil.ReadAll(resp.Response().Body) - if err != nil { - return result, err - } - if len(b) > 0 { - b = removeBOM(b) - err = xml.Unmarshal(b, result) - if err != nil { - return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") - } - } - return result, nil -} - -// SetProperties sets properties for a storage account's Queue service endpoint, including properties for Storage -// Analytics and CORS (Cross-Origin Resource Sharing) rules -// -// storageServiceProperties is the StorageService properties. timeout is the The timeout parameter is expressed in -// seconds. For more information, see = int64(r) { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case inclusiveMinimum: - if i < int64(r) { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case inclusiveMaximum: - if i > int64(r) { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) - } - return nil -} - -func validateFloat(x reflect.Value, v constraint) error { - f := x.Float() - r, ok := v.rule.(float64) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) - } - switch v.name { - case exclusiveMinimum: - if f <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case exclusiveMaximum: - if f >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case inclusiveMinimum: - if f < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case inclusiveMaximum: - if f > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) - } - return nil -} - -func validateString(x reflect.Value, v constraint) error { - s := x.String() - switch v.name { - case empty: - if len(s) == 0 { - return checkEmpty(x, v) - } - case pattern: - reg, err := regexp.Compile(v.rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - if !reg.MatchString(s) { - return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) - } - case maxLength: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) - } - if len(s) > v.rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) - } - case minLength: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) - } - if len(s) < v.rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) - } - case readOnly: - if len(s) > 0 { - return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) - } - if v.chain != nil { - return validate([]validation{ - { - targetValue: getInterfaceValue(x), - constraints: v.chain, - }, - }) - } - return nil -} - -func validateArrayMap(x reflect.Value, v constraint) error { - switch v.name { - case null: - if x.IsNil() { - return checkNil(x, v) - } - case empty: - if x.IsNil() || x.Len() == 0 { - return checkEmpty(x, v) - } - case maxItems: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) - } - if x.Len() > v.rule.(int) { - return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) - } - case minItems: - if _, ok := v.rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) - } - if x.Len() < v.rule.(int) { - return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) - } - case uniqueItems: - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - if !checkForUniqueInArray(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) - } - } else if x.Kind() == reflect.Map { - if !checkForUniqueInMap(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) - } - } else { - return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) - } - case readOnly: - if x.Len() != 0 { - return createError(x, v, "readonly parameter; must send as nil or empty in request") - } - case pattern: - reg, err := regexp.Compile(v.rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - keys := x.MapKeys() - for _, k := range keys { - if !reg.MatchString(k.String()) { - return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) - } - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) - } - if v.chain != nil { - return validate([]validation{ - { - targetValue: getInterfaceValue(x), - constraints: v.chain, - }, - }) - } - return nil -} - -func checkNil(x reflect.Value, v constraint) error { - if _, ok := v.rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) - } - if v.rule.(bool) { - return createError(x, v, "value can not be null; required parameter") - } - return nil -} - -func checkEmpty(x reflect.Value, v constraint) error { - if _, ok := v.rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) - } - if v.rule.(bool) { - return createError(x, v, "value can not be null or empty; required parameter") - } - return nil -} - -func checkForUniqueInArray(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - arrOfInterface := make([]interface{}, x.Len()) - for i := 0; i < x.Len(); i++ { - arrOfInterface[i] = x.Index(i).Interface() - } - m := make(map[interface{}]bool) - for _, val := range arrOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func checkForUniqueInMap(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - mapOfInterface := make(map[interface{}]interface{}, x.Len()) - keys := x.MapKeys() - for _, k := range keys { - mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() - } - m := make(map[interface{}]bool) - for _, val := range mapOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func getInterfaceValue(x reflect.Value) interface{} { - if x.Kind() == reflect.Invalid { - return nil - } - return x.Interface() -} - -func isZero(x interface{}) bool { - return x == reflect.Zero(reflect.TypeOf(x)).Interface() -} - -func createError(x reflect.Value, v constraint, message string) error { - return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", - v.target, v.name, getInterfaceValue(x), message)) -} diff --git a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_version.go deleted file mode 100644 index fcfebb10799..00000000000 --- a/vendor/github.com/Azure/azure-storage-queue-go/azqueue/zz_generated_version.go +++ /dev/null @@ -1,14 +0,0 @@ -package azqueue - -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. - -// UserAgent returns the UserAgent string to use when sending http.Requests. -func UserAgent() string { - return "Azure-SDK-For-Go/0.0.0 azqueue/2018-03-28" -} - -// Version returns the semantic version (see http://semver.org) of the client. -func Version() string { - return "0.0.0" -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE deleted file mode 100644 index b9d6a27ea92..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go deleted file mode 100644 index 86694bd2555..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Package to provides helpers to ease working with pointer values of marshalled structures. -*/ -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// String returns a string value for the passed string pointer. It returns the empty string if the -// pointer is nil. -func String(s *string) string { - if s != nil { - return *s - } - return "" -} - -// StringPtr returns a pointer to the passed string. -func StringPtr(s string) *string { - return &s -} - -// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil -// slice if the pointer is nil. -func StringSlice(s *[]string) []string { - if s != nil { - return *s - } - return nil -} - -// StringSlicePtr returns a pointer to the passed string slice. -func StringSlicePtr(s []string) *[]string { - return &s -} - -// StringMap returns a map of strings built from the map of string pointers. The empty string is -// used for nil pointers. -func StringMap(msp map[string]*string) map[string]string { - ms := make(map[string]string, len(msp)) - for k, sp := range msp { - if sp != nil { - ms[k] = *sp - } else { - ms[k] = "" - } - } - return ms -} - -// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. -func StringMapPtr(ms map[string]string) *map[string]*string { - msp := make(map[string]*string, len(ms)) - for k, s := range ms { - msp[k] = StringPtr(s) - } - return &msp -} - -// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. -func Bool(b *bool) bool { - if b != nil { - return *b - } - return false -} - -// BoolPtr returns a pointer to the passed bool. -func BoolPtr(b bool) *bool { - return &b -} - -// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int(i *int) int { - if i != nil { - return *i - } - return 0 -} - -// IntPtr returns a pointer to the passed int. -func IntPtr(i int) *int { - return &i -} - -// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int32(i *int32) int32 { - if i != nil { - return *i - } - return 0 -} - -// Int32Ptr returns a pointer to the passed int32. -func Int32Ptr(i int32) *int32 { - return &i -} - -// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. -func Int64(i *int64) int64 { - if i != nil { - return *i - } - return 0 -} - -// Int64Ptr returns a pointer to the passed int64. -func Int64Ptr(i int64) *int64 { - return &i -} - -// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float32(i *float32) float32 { - if i != nil { - return *i - } - return 0.0 -} - -// Float32Ptr returns a pointer to the passed float32. -func Float32Ptr(i float32) *float32 { - return &i -} - -// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. -func Float64(i *float64) float64 { - if i != nil { - return *i - } - return 0.0 -} - -// Float64Ptr returns a pointer to the passed float64. -func Float64Ptr(i float64) *float64 { - return &i -} - -// ByteSlicePtr returns a pointer to the passed byte slice. -func ByteSlicePtr(b []byte) *[]byte { - return &b -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go deleted file mode 100644 index b7310f6b868..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/to/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package to - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE deleted file mode 100644 index b9d6a27ea92..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go deleted file mode 100644 index fed156dbf6e..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go +++ /dev/null @@ -1,48 +0,0 @@ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" -) - -// Error is the type that's returned when the validation of an APIs arguments constraints fails. -type Error struct { - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // Message is the error message. - Message string -} - -// Error returns a string containing the details of the validation failure. -func (e Error) Error() string { - return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) -} - -// NewError creates a new Error object with the specified parameters. -// message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) Error { - return Error{ - PackageType: packageType, - Method: method, - Message: fmt.Sprintf(message, args...), - } -} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go deleted file mode 100644 index cf1436291a7..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/go_mod_tidy_hack.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build modhack - -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file, and the github.com/Azure/go-autorest import, won't actually become part of -// the resultant binary. - -// Necessary for safely adding multi-module repo. -// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository -import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go deleted file mode 100644 index ff41cfe0796..00000000000 --- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go +++ /dev/null @@ -1,406 +0,0 @@ -/* -Package validation provides methods for validating parameter value using reflection. -*/ -package validation - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "reflect" - "regexp" - "strings" -) - -// Disabled controls if parameter validation should be globally disabled. The default is false. -var Disabled bool - -// Constraint stores constraint name, target field name -// Rule and chain validations. -type Constraint struct { - - // Target field name for validation. - Target string - - // Constraint name e.g. minLength, MaxLength, Pattern, etc. - Name string - - // Rule for constraint e.g. greater than 10, less than 5 etc. - Rule interface{} - - // Chain Validations for struct type - Chain []Constraint -} - -// Validation stores parameter-wise validation. -type Validation struct { - TargetValue interface{} - Constraints []Constraint -} - -// Constraint list -const ( - Empty = "Empty" - Null = "Null" - ReadOnly = "ReadOnly" - Pattern = "Pattern" - MaxLength = "MaxLength" - MinLength = "MinLength" - MaxItems = "MaxItems" - MinItems = "MinItems" - MultipleOf = "MultipleOf" - UniqueItems = "UniqueItems" - InclusiveMaximum = "InclusiveMaximum" - ExclusiveMaximum = "ExclusiveMaximum" - ExclusiveMinimum = "ExclusiveMinimum" - InclusiveMinimum = "InclusiveMinimum" -) - -// Validate method validates constraints on parameter -// passed in validation array. -func Validate(m []Validation) error { - if Disabled { - return nil - } - for _, item := range m { - v := reflect.ValueOf(item.TargetValue) - for _, constraint := range item.Constraints { - var err error - switch v.Kind() { - case reflect.Ptr: - err = validatePtr(v, constraint) - case reflect.String: - err = validateString(v, constraint) - case reflect.Struct: - err = validateStruct(v, constraint) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = validateInt(v, constraint) - case reflect.Float32, reflect.Float64: - err = validateFloat(v, constraint) - case reflect.Array, reflect.Slice, reflect.Map: - err = validateArrayMap(v, constraint) - default: - err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) - } - - if err != nil { - return err - } - } - } - return nil -} - -func validateStruct(x reflect.Value, v Constraint, name ...string) error { - //Get field name from target name which is in format a.b.c - s := strings.Split(v.Target, ".") - f := x.FieldByName(s[len(s)-1]) - if isZero(f) { - return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) - } - - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(f), - Constraints: []Constraint{v}, - }, - }) -} - -func validatePtr(x reflect.Value, v Constraint) error { - if v.Name == ReadOnly { - if !x.IsNil() { - return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") - } - return nil - } - if x.IsNil() { - return checkNil(x, v) - } - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x.Elem()), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateInt(x reflect.Value, v Constraint) error { - i := x.Int() - r, ok := toInt64(v.Rule) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case MultipleOf: - if i%r != 0 { - return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) - } - case ExclusiveMinimum: - if i <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if i >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if i < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if i > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) - } - return nil -} - -func validateFloat(x reflect.Value, v Constraint) error { - f := x.Float() - r, ok := v.Rule.(float64) - if !ok { - return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) - } - switch v.Name { - case ExclusiveMinimum: - if f <= r { - return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) - } - case ExclusiveMaximum: - if f >= r { - return createError(x, v, fmt.Sprintf("value must be less than %v", r)) - } - case InclusiveMinimum: - if f < r { - return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) - } - case InclusiveMaximum: - if f > r { - return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) - } - return nil -} - -func validateString(x reflect.Value, v Constraint) error { - s := x.String() - switch v.Name { - case Empty: - if len(s) == 0 { - return checkEmpty(x, v) - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - if !reg.MatchString(s) { - return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) - } - case MaxLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) - } - case MinLength: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) - } - if len(s) < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) - } - case ReadOnly: - if len(s) > 0 { - return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") - } - default: - return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func validateArrayMap(x reflect.Value, v Constraint) error { - switch v.Name { - case Null: - if x.IsNil() { - return checkNil(x, v) - } - case Empty: - if x.IsNil() || x.Len() == 0 { - return checkEmpty(x, v) - } - case MaxItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() > v.Rule.(int) { - return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) - } - case MinItems: - if _, ok := v.Rule.(int); !ok { - return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) - } - if x.Len() < v.Rule.(int) { - return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) - } - case UniqueItems: - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - if !checkForUniqueInArray(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else if x.Kind() == reflect.Map { - if !checkForUniqueInMap(x) { - return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) - } - } else { - return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) - } - case ReadOnly: - if x.Len() != 0 { - return createError(x, v, "readonly parameter; must send as nil or empty in request") - } - case Pattern: - reg, err := regexp.Compile(v.Rule.(string)) - if err != nil { - return createError(x, v, err.Error()) - } - keys := x.MapKeys() - for _, k := range keys { - if !reg.MatchString(k.String()) { - return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) - } - } - default: - return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) - } - - if v.Chain != nil { - return Validate([]Validation{ - { - TargetValue: getInterfaceValue(x), - Constraints: v.Chain, - }, - }) - } - return nil -} - -func checkNil(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - if v.Rule.(bool) { - return createError(x, v, "value can not be null; required parameter") - } - return nil -} - -func checkEmpty(x reflect.Value, v Constraint) error { - if _, ok := v.Rule.(bool); !ok { - return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) - } - - if v.Rule.(bool) { - return createError(x, v, "value can not be null or empty; required parameter") - } - return nil -} - -func checkForUniqueInArray(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - arrOfInterface := make([]interface{}, x.Len()) - - for i := 0; i < x.Len(); i++ { - arrOfInterface[i] = x.Index(i).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range arrOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func checkForUniqueInMap(x reflect.Value) bool { - if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { - return false - } - mapOfInterface := make(map[interface{}]interface{}, x.Len()) - - keys := x.MapKeys() - for _, k := range keys { - mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() - } - - m := make(map[interface{}]bool) - for _, val := range mapOfInterface { - if m[val] { - return false - } - m[val] = true - } - return true -} - -func getInterfaceValue(x reflect.Value) interface{} { - if x.Kind() == reflect.Invalid { - return nil - } - return x.Interface() -} - -func isZero(x interface{}) bool { - return x == reflect.Zero(reflect.TypeOf(x)).Interface() -} - -func createError(x reflect.Value, v Constraint, err string) error { - return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", - v.Target, v.Name, getInterfaceValue(x), err) -} - -func toInt64(v interface{}) (int64, bool) { - if i64, ok := v.(int64); ok { - return i64, true - } - // older generators emit max constants as int, so if int64 fails fall back to int - if i32, ok := v.(int); ok { - return int64(i32), true - } - return 0, false -} diff --git a/vendor/github.com/devigned/tab/.gitignore b/vendor/github.com/devigned/tab/.gitignore deleted file mode 100644 index b3efc391885..00000000000 --- a/vendor/github.com/devigned/tab/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -.idea \ No newline at end of file diff --git a/vendor/github.com/devigned/tab/Makefile b/vendor/github.com/devigned/tab/Makefile deleted file mode 100644 index 945767089ca..00000000000 --- a/vendor/github.com/devigned/tab/Makefile +++ /dev/null @@ -1,86 +0,0 @@ -PACKAGE = github.com/devigned/tab -DATE ?= $(shell date +%FT%T%z) -VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \ - cat $(CURDIR)/.version 2> /dev/null || echo v0) -BIN = $(GOPATH)/bin -BASE = $(CURDIR) -PKGS = $(or $(PKG),$(shell cd $(BASE) && env GOPATH=$(GOPATH) $(GO) list ./... | grep -vE "^$(PACKAGE)/templates/")) -TESTPKGS = $(shell env GOPATH=$(GOPATH) $(GO) list -f '{{ if or .TestGoFiles .XTestGoFiles }}{{ .ImportPath }}{{ end }}' $(PKGS)) -GO_FILES = find . -iname '*.go' -type f - -GO = go -GODOC = godoc -GOFMT = gofmt -GOCYCLO = gocyclo - -V = 0 -Q = $(if $(filter 1,$V),,@) -M = $(shell printf "\033[34;1m▶\033[0m") -TIMEOUT = 1100 - -.PHONY: all -all: fmt lint vet ; $(info $(M) building library…) @ ## Build program - $Q cd $(BASE) && $(GO) build -tags release - -# Tools - -GOLINT = $(BIN)/golint -$(BIN)/golint: ; $(info $(M) building golint…) - $Q go get github.com/golang/lint/golint - -# Tests - -TEST_TARGETS := test-default test-bench test-verbose test-race test-debug test-cover -.PHONY: $(TEST_TARGETS) test-xml check test tests -test-bench: ARGS=-run=__absolutelynothing__ -bench=. ## Run benchmarks -test-verbose: ARGS=-v ## Run tests in verbose mode -test-debug: ARGS=-v -debug ## Run tests in verbose mode with debug output -test-race: ARGS=-race ## Run tests with race detector -test-cover: ARGS=-cover -coverprofile=cover.out -v ## Run tests in verbose mode with coverage -$(TEST_TARGETS): NAME=$(MAKECMDGOALS:test-%=%) -$(TEST_TARGETS): test -check test tests: cyclo lint vet; $(info $(M) running $(NAME:%=% )tests…) @ ## Run tests - $Q cd $(BASE) && $(GO) test -timeout $(TIMEOUT)s $(ARGS) $(TESTPKGS) - -.PHONY: vet -vet: $(GOLINT) ; $(info $(M) running vet…) @ ## Run vet - $Q cd $(BASE) && $(GO) vet ./... - -.PHONY: lint -lint: $(GOLINT) ; $(info $(M) running golint…) @ ## Run golint - $Q cd $(BASE) && ret=0 && for pkg in $(PKGS); do \ - test -z "$$($(GOLINT) $$pkg | tee /dev/stderr)" || ret=1 ; \ - done ; exit $$ret - -.PHONY: fmt -fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files - @ret=0 && for d in $$($(GO) list -f '{{.Dir}}' ./...); do \ - $(GOFMT) -l -w $$d/*.go || ret=$$? ; \ - done ; exit $$ret - -.PHONY: cyclo -cyclo: ; $(info $(M) running gocyclo...) @ ## Run gocyclo on all source files - $Q cd $(BASE) && $(GOCYCLO) -over 19 $$($(GO_FILES)) - -.Phony: destroy-sb -destroy-sb: ; $(info $(M) running sb destroy...) - $(Q) terraform destroy -auto-approve - -# Dependency management -go.sum: go.mod - $Q cd $(BASE) && $(GO) mod tidy - -# Misc - -.PHONY: clean -clean: ; $(info $(M) cleaning…) @ ## Cleanup everything - @rm -rf test/tests.* test/coverage.* - -.PHONY: help -help: - @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ - awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' - -.PHONY: version -version: - @echo $(VERSION) \ No newline at end of file diff --git a/vendor/github.com/devigned/tab/README.md b/vendor/github.com/devigned/tab/README.md deleted file mode 100644 index 98681c9ef1a..00000000000 --- a/vendor/github.com/devigned/tab/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Trace Abstraction (tab) -OpenTracing and OpenCensus abstraction for tracing and logging. - -Why? Well, sometimes you want to let the consumer choose the tracing / logging implementation. - -## Getting Started -### Installing the library - -``` -go get -u github.com/devigned/tab/... -``` - -If you need to install Go, follow [the official instructions](https://golang.org/dl/) - -### Usage - -```go -package main - -import ( - "context" - "fmt" - - "github.com/devigned/tab" - _ "github.com/devigned/tab/opencensus" // use OpenCensus - // _ "github.com/devigned/tab/opentracing" // use OpenTracing -) - -func main() { - // start a root span - ctx, span := tab.StartSpan(context.Background(), "main") - defer span.End() // close span when done - - // pass context w/ span to child func - printHelloWorld(ctx) -} - -func printHelloWorld(ctx context.Context) { - // start new span from parent - _, span := tab.StartSpan(ctx, "printHelloWorld") - defer span.End() // close span when done - - // add attribute to span - span.AddAttributes(tab.StringAttribute("interesting", "value")) - fmt.Println("Hello World!") - tab.For(ctx).Info("after println call") -} - -``` \ No newline at end of file diff --git a/vendor/github.com/devigned/tab/trace.go b/vendor/github.com/devigned/tab/trace.go deleted file mode 100644 index 2c72e5f5b6a..00000000000 --- a/vendor/github.com/devigned/tab/trace.go +++ /dev/null @@ -1,200 +0,0 @@ -package tab - -import ( - "context" -) - -var ( - tracer Tracer = new(NoOpTracer) -) - -// Register a Tracer instance -func Register(t Tracer) { - tracer = t -} - -// BoolAttribute returns a bool-valued attribute. -func BoolAttribute(key string, value bool) Attribute { - return Attribute{Key: key, Value: value} -} - -// StringAttribute returns a string-valued attribute. -func StringAttribute(key, value string) Attribute { - return Attribute{Key: key, Value: value} -} - -// Int64Attribute returns an int64-valued attribute. -func Int64Attribute(key string, value int64) Attribute { - return Attribute{Key: key, Value: value} -} - -// StartSpan starts a new child span -func StartSpan(ctx context.Context, operationName string, opts ...interface{}) (context.Context, Spanner) { - if tracer == nil { - return ctx, new(noOpSpanner) - } - return tracer.StartSpan(ctx, operationName, opts) -} - -// StartSpanWithRemoteParent starts a new child span of the span from the given parent. -func StartSpanWithRemoteParent(ctx context.Context, operationName string, carrier Carrier, opts ...interface{}) (context.Context, Spanner) { - if tracer == nil { - return ctx, new(noOpSpanner) - } - return tracer.StartSpanWithRemoteParent(ctx, operationName, carrier, opts) -} - -// FromContext returns the Span stored in a context, or nil if there isn't one. -func FromContext(ctx context.Context) Spanner { - if tracer == nil { - return new(noOpSpanner) - } - return tracer.FromContext(ctx) -} - -// NewContext returns a new context with the given Span attached. -func NewContext(ctx context.Context, span Spanner) context.Context { - if tracer == nil { - return ctx - } - return tracer.NewContext(ctx, span) -} - -type ( - // Attribute is a key value pair for decorating spans - Attribute struct { - Key string - Value interface{} - } - - // Carrier is an abstraction over OpenTracing and OpenCensus propagation carrier - Carrier interface { - Set(key string, value interface{}) - GetKeyValues() map[string]interface{} - } - - // Spanner is an abstraction over OpenTracing and OpenCensus Spans - Spanner interface { - AddAttributes(attributes ...Attribute) - End() - Logger() Logger - Inject(carrier Carrier) error - InternalSpan() interface{} - } - - // Tracer is an abstraction over OpenTracing and OpenCensus trace implementations - Tracer interface { - StartSpan(ctx context.Context, operationName string, opts ...interface{}) (context.Context, Spanner) - StartSpanWithRemoteParent(ctx context.Context, operationName string, carrier Carrier, opts ...interface{}) (context.Context, Spanner) - FromContext(ctx context.Context) Spanner - NewContext(parent context.Context, span Spanner) context.Context - } - - // Logger is a generic interface for logging - Logger interface { - Info(msg string, attributes ...Attribute) - Error(err error, attributes ...Attribute) - Fatal(msg string, attributes ...Attribute) - Debug(msg string, attributes ...Attribute) - } - - // SpanLogger is a Logger implementation which logs to a tracing span - SpanLogger struct { - Span Spanner - } - - // NoOpTracer is a Tracer implementation that does nothing, thus no op - NoOpTracer struct{} - - noOpLogger struct{} - - noOpSpanner struct{} -) - -// StartSpan returns the input context and a no op Spanner -func (nt *NoOpTracer) StartSpan(ctx context.Context, operationName string, opts ...interface{}) (context.Context, Spanner) { - return ctx, new(noOpSpanner) -} - -// StartSpanWithRemoteParent returns the input context and a no op Spanner -func (nt *NoOpTracer) StartSpanWithRemoteParent(ctx context.Context, operationName string, carrier Carrier, opts ...interface{}) (context.Context, Spanner) { - return ctx, new(noOpSpanner) -} - -// FromContext returns a no op Spanner without regard to the input context -func (nt *NoOpTracer) FromContext(ctx context.Context) Spanner { - return new(noOpSpanner) -} - -// NewContext returns the parent context -func (nt *NoOpTracer) NewContext(parent context.Context, span Spanner) context.Context { - return parent -} - -// AddAttributes is a nop -func (ns *noOpSpanner) AddAttributes(attributes ...Attribute) {} - -// End is a nop -func (ns *noOpSpanner) End() {} - -// Logger returns a nopLogger -func (ns *noOpSpanner) Logger() Logger { - return new(noOpLogger) -} - -// Inject is a nop -func (ns *noOpSpanner) Inject(carrier Carrier) error { - return nil -} - -// InternalSpan returns nil -func (ns *noOpSpanner) InternalSpan() interface{} { - return nil -} - -// For will return a logger for a given context -func For(ctx context.Context) Logger { - if span := tracer.FromContext(ctx); span != nil { - return span.Logger() - } - return new(noOpLogger) -} - -// Info logs an info tag with message to a span -func (sl SpanLogger) Info(msg string, attributes ...Attribute) { - sl.logToSpan("info", msg, attributes...) -} - -// Error logs an error tag with message to a span -func (sl SpanLogger) Error(err error, attributes ...Attribute) { - attributes = append(attributes, BoolAttribute("error", true)) - sl.logToSpan("error", err.Error(), attributes...) -} - -// Fatal logs an error tag with message to a span -func (sl SpanLogger) Fatal(msg string, attributes ...Attribute) { - attributes = append(attributes, BoolAttribute("error", true)) - sl.logToSpan("fatal", msg, attributes...) -} - -// Debug logs a debug tag with message to a span -func (sl SpanLogger) Debug(msg string, attributes ...Attribute) { - sl.logToSpan("debug", msg, attributes...) -} - -func (sl SpanLogger) logToSpan(level string, msg string, attributes ...Attribute) { - attrs := append(attributes, StringAttribute("event", msg), StringAttribute("level", level)) - sl.Span.AddAttributes(attrs...) -} - -// Info nops log entry -func (sl noOpLogger) Info(msg string, attributes ...Attribute) {} - -// Error nops log entry -func (sl noOpLogger) Error(err error, attributes ...Attribute) {} - -// Fatal nops log entry -func (sl noOpLogger) Fatal(msg string, attributes ...Attribute) {} - -// Debug nops log entry -func (sl noOpLogger) Debug(msg string, attributes ...Attribute) {} diff --git a/vendor/github.com/mattn/go-ieproxy/.gitignore b/vendor/github.com/mattn/go-ieproxy/.gitignore deleted file mode 100644 index bc8a670e021..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea/* \ No newline at end of file diff --git a/vendor/github.com/mattn/go-ieproxy/README.md b/vendor/github.com/mattn/go-ieproxy/README.md deleted file mode 100644 index 3e3b4759cf7..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# ieproxy - -Go package to detect the proxy settings on Windows platform, and MacOS. - -On Windows, the settings are initially attempted to be read from the [`WinHttpGetIEProxyConfigForCurrentUser` DLL call](https://docs.microsoft.com/en-us/windows/desktop/api/winhttp/nf-winhttp-winhttpgetieproxyconfigforcurrentuser), but falls back to the registry (`CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings`) in the event the DLL call fails. - -On MacOS, the settings are read from [`CFNetworkCopySystemProxySettings` method of CFNetwork](https://developer.apple.com/documentation/cfnetwork/1426754-cfnetworkcopysystemproxysettings?language=objc). - -For more information, take a look at the [documentation](https://godoc.org/github.com/mattn/go-ieproxy) - -## Methods - -You can either obtain a `net/http` compatible proxy function using `ieproxy.GetProxyFunc()`, set environment variables using `ieproxy.OverrideEnvWithStaticProxy()` (though no automatic configuration is available this way), or obtain the proxy settings via `ieproxy.GetConf()`. - -| Method | Supported configuration options: | -|----------------------------------------|-----------------------------------------------| -| `ieproxy.GetProxyFunc()` | Static, Specified script, and fully automatic | -| `ieproxy.OverrideEnvWithStaticProxy()` | Static | -| `ieproxy.GetConf()` | Depends on how you use it | - -## Examples - -### Using GetProxyFunc(): - -```go -func init() { - http.DefaultTransport.(*http.Transport).Proxy = ieproxy.GetProxyFunc() -} -``` - -GetProxyFunc acts as a middleman between `net/http` and `mattn/go-ieproxy` in order to select the correct proxy configuration based off the details supplied in the config. - -### Using OverrideEnvWithStaticProxy(): - -```go -func init() { - ieproxy.OverrideEnvWithStaticProxy() - http.DefaultTransport.(*http.Transport).Proxy = http.ProxyFromEnvironment -} -``` - -OverrideEnvWithStaticProxy overrides the relevant environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`) with the **static, manually configured** proxy details typically found in the registry. - -### Using GetConf(): - -```go -func main() { - conf := ieproxy.GetConf() - //Handle proxies how you want to. -} -``` diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy.go b/vendor/github.com/mattn/go-ieproxy/ieproxy.go deleted file mode 100644 index 0b5460bb828..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package ieproxy is a utility to retrieve the proxy parameters (especially of Internet Explorer on windows) -// -// On windows, it gathers the parameters from the registry (regedit), while it uses env variable on other platforms -package ieproxy - -import "os" - -// ProxyConf gathers the configuration for proxy -type ProxyConf struct { - Static StaticProxyConf // static configuration - Automatic ProxyScriptConf // script configuration -} - -// StaticProxyConf contains the configuration for static proxy -type StaticProxyConf struct { - // Is the proxy active? - Active bool - // Proxy address for each scheme (http, https) - // "" (empty string) is the fallback proxy - Protocols map[string]string - // Addresses not to be browsed via the proxy (comma-separated, linux-like) - NoProxy string -} - -// ProxyScriptConf contains the configuration for automatic proxy -type ProxyScriptConf struct { - // Is the proxy active? - Active bool - // PreConfiguredURL of the .pac file. - // If this is empty and Active is true, auto-configuration should be assumed. - PreConfiguredURL string -} - -// GetConf retrieves the proxy configuration from the Windows Regedit -func GetConf() ProxyConf { - return getConf() -} - -// ReloadConf reloads the proxy configuration -func ReloadConf() ProxyConf { - return reloadConf() -} - -// OverrideEnvWithStaticProxy writes new values to the -// `http_proxy`, `https_proxy` and `no_proxy` environment variables. -// The values are taken from the Windows Regedit (should be called in `init()` function - see example) -func OverrideEnvWithStaticProxy() { - overrideEnvWithStaticProxy(GetConf(), os.Setenv) -} - -// FindProxyForURL computes the proxy for a given URL according to the pac file -func (psc *ProxyScriptConf) FindProxyForURL(URL string) string { - return psc.findProxyForURL(URL) -} - -type envSetter func(string, string) error diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go deleted file mode 100644 index 5d53555708b..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go +++ /dev/null @@ -1,123 +0,0 @@ -package ieproxy - -/* -#cgo LDFLAGS: -framework CoreFoundation -#cgo LDFLAGS: -framework CFNetwork -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "sync" - "unsafe" -) - -var once sync.Once -var darwinProxyConf ProxyConf - -// GetConf retrieves the proxy configuration from the Windows Regedit -func getConf() ProxyConf { - once.Do(writeConf) - return darwinProxyConf -} - -// reloadConf forces a reload of the proxy configuration. -func reloadConf() ProxyConf { - writeConf() - return getConf() -} - -func cfStringGetGoString(cfStr C.CFStringRef) string { - retCString := (*C.char)(C.calloc(C.ulong(uint(128)), 1)) - defer C.free(unsafe.Pointer(retCString)) - - C.CFStringGetCString(cfStr, retCString, C.long(128), C.kCFStringEncodingUTF8) - return C.GoString(retCString) -} - -func cfNumberGetGoInt(cfNum C.CFNumberRef) int { - ret := 0 - C.CFNumberGetValue(cfNum, C.kCFNumberIntType, unsafe.Pointer(&ret)) - return ret -} - -func cfArrayGetGoStrings(cfArray C.CFArrayRef) []string { - var ret []string - for i := 0; i < int(C.CFArrayGetCount(cfArray)); i++ { - cfStr := C.CFStringRef(C.CFArrayGetValueAtIndex(cfArray, C.long(i))) - if unsafe.Pointer(cfStr) != C.NULL { - ret = append(ret, cfStringGetGoString(cfStr)) - } - } - return ret -} - -func writeConf() { - cfDictProxy := C.CFDictionaryRef(C.CFNetworkCopySystemProxySettings()) - defer C.CFRelease(C.CFTypeRef(cfDictProxy)) - darwinProxyConf = ProxyConf{} - - cfNumHttpEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPEnable))) - if unsafe.Pointer(cfNumHttpEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpEnable) > 0 { - darwinProxyConf.Static.Active = true - if darwinProxyConf.Static.Protocols == nil { - darwinProxyConf.Static.Protocols = make(map[string]string) - } - httpHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPProxy))) - httpPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPPort))) - - httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpHost), cfNumberGetGoInt(httpPort)) - darwinProxyConf.Static.Protocols["http"] = httpProxy - } - - cfNumHttpsEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSEnable))) - if unsafe.Pointer(cfNumHttpsEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpsEnable) > 0 { - darwinProxyConf.Static.Active = true - if darwinProxyConf.Static.Protocols == nil { - darwinProxyConf.Static.Protocols = make(map[string]string) - } - httpsHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSProxy))) - httpsPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSPort))) - - httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpsHost), cfNumberGetGoInt(httpsPort)) - darwinProxyConf.Static.Protocols["https"] = httpProxy - } - - if darwinProxyConf.Static.Active { - cfArrayExceptionList := C.CFArrayRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesExceptionsList))) - if unsafe.Pointer(cfArrayExceptionList) != C.NULL { - exceptionList := cfArrayGetGoStrings(cfArrayExceptionList) - darwinProxyConf.Static.NoProxy = strings.Join(exceptionList, ",") - } - } - - cfNumPacEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigEnable))) - if unsafe.Pointer(cfNumPacEnable) != C.NULL && cfNumberGetGoInt(cfNumPacEnable) > 0 { - cfStringPac := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigURLString))) - if unsafe.Pointer(cfStringPac) != C.NULL { - pac := cfStringGetGoString(cfStringPac) - darwinProxyConf.Automatic.PreConfiguredURL = pac - darwinProxyConf.Automatic.Active = true - } - } -} - -// OverrideEnvWithStaticProxy writes new values to the -// http_proxy, https_proxy and no_proxy environment variables. -// The values are taken from the MacOS System Preferences. -func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { - if conf.Static.Active { - for _, scheme := range []string{"http", "https"} { - url := conf.Static.Protocols[scheme] - if url != "" { - setenv(scheme+"_proxy", url) - } - } - if conf.Static.NoProxy != "" { - setenv("no_proxy", conf.Static.NoProxy) - } - } -} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go deleted file mode 100644 index c352546e23c..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !windows && (!darwin || !cgo) -// +build !windows,!darwin !cgo - -package ieproxy - -func getConf() ProxyConf { - return ProxyConf{} -} - -func reloadConf() ProxyConf { - return getConf() -} - -func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) { -} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go deleted file mode 100644 index 7fd375017f6..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go +++ /dev/null @@ -1,219 +0,0 @@ -package ieproxy - -import ( - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/windows/registry" -) - -type regeditValues struct { - ProxyServer string - ProxyOverride string - ProxyEnable uint64 - AutoConfigURL string -} - -var once sync.Once -var windowsProxyConf ProxyConf - -// GetConf retrieves the proxy configuration from the Windows Regedit -func getConf() ProxyConf { - once.Do(writeConf) - return windowsProxyConf -} - -// reloadConf forces a reload of the proxy configuration from the Windows registry -func reloadConf() ProxyConf { - writeConf() - return getConf() -} - -func writeConf() { - proxy := "" - proxyByPass := "" - autoConfigUrl := "" - autoDetect := false - - // Try from IE first. - if ieCfg, err := getUserConfigFromWindowsSyscall(); err == nil { - defer globalFreeWrapper(ieCfg.lpszProxy) - defer globalFreeWrapper(ieCfg.lpszProxyBypass) - defer globalFreeWrapper(ieCfg.lpszAutoConfigUrl) - - proxy = StringFromUTF16Ptr(ieCfg.lpszProxy) - proxyByPass = StringFromUTF16Ptr(ieCfg.lpszProxyBypass) - autoConfigUrl = StringFromUTF16Ptr(ieCfg.lpszAutoConfigUrl) - autoDetect = ieCfg.fAutoDetect - } - - if proxy == "" && !autoDetect { - // Try WinHTTP default proxy. - if defaultCfg, err := getDefaultProxyConfiguration(); err == nil { - defer globalFreeWrapper(defaultCfg.lpszProxy) - defer globalFreeWrapper(defaultCfg.lpszProxyBypass) - - // Always set both of these (they are a pair, it doesn't make sense to set one here and keep the value of the other from above) - proxy = StringFromUTF16Ptr(defaultCfg.lpszProxy) - proxyByPass = StringFromUTF16Ptr(defaultCfg.lpszProxyBypass) - } - } - - if proxy == "" && !autoDetect { - // Fall back to IE registry or manual detection if nothing is found there.. - regedit, _ := readRegedit() // If the syscall fails, backup to manual detection. - windowsProxyConf = parseRegedit(regedit) - return - } - - // Setting the proxy settings. - windowsProxyConf = ProxyConf{ - Static: StaticProxyConf{ - Active: len(proxy) > 0, - }, - Automatic: ProxyScriptConf{ - Active: len(autoConfigUrl) > 0 || autoDetect, - }, - } - - if windowsProxyConf.Static.Active { - protocol := make(map[string]string) - for _, s := range strings.Split(proxy, ";") { - s = strings.TrimSpace(s) - if s == "" { - continue - } - pair := strings.SplitN(s, "=", 2) - if len(pair) > 1 { - protocol[pair[0]] = pair[1] - } else { - protocol[""] = pair[0] - } - } - - windowsProxyConf.Static.Protocols = protocol - if len(proxyByPass) > 0 { - windowsProxyConf.Static.NoProxy = strings.Replace(proxyByPass, ";", ",", -1) - } - } - - if windowsProxyConf.Automatic.Active { - windowsProxyConf.Automatic.PreConfiguredURL = autoConfigUrl - } -} - -func getUserConfigFromWindowsSyscall() (*tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG, error) { - if err := winHttpGetIEProxyConfigForCurrentUser.Find(); err != nil { - return nil, err - } - p := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG) - r, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(p))) - if rTrue(r) { - return p, nil - } - return nil, err -} - -func getDefaultProxyConfiguration() (*tWINHTTP_PROXY_INFO, error) { - pInfo := new(tWINHTTP_PROXY_INFO) - if err := winHttpGetDefaultProxyConfiguration.Find(); err != nil { - return nil, err - } - r, _, err := winHttpGetDefaultProxyConfiguration.Call(uintptr(unsafe.Pointer(pInfo))) - if rTrue(r) { - return pInfo, nil - } - return nil, err -} - -// OverrideEnvWithStaticProxy writes new values to the -// http_proxy, https_proxy and no_proxy environment variables. -// The values are taken from the Windows Regedit (should be called in init() function) -func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { - if conf.Static.Active { - for _, scheme := range []string{"http", "https"} { - url := mapFallback(scheme, "", conf.Static.Protocols) - setenv(scheme+"_proxy", url) - } - if conf.Static.NoProxy != "" { - setenv("no_proxy", conf.Static.NoProxy) - } - } -} - -func parseRegedit(regedit regeditValues) ProxyConf { - protocol := make(map[string]string) - for _, s := range strings.Split(regedit.ProxyServer, ";") { - if s == "" { - continue - } - pair := strings.SplitN(s, "=", 2) - if len(pair) > 1 { - protocol[pair[0]] = pair[1] - } else { - protocol[""] = pair[0] - } - } - - return ProxyConf{ - Static: StaticProxyConf{ - Active: regedit.ProxyEnable > 0, - Protocols: protocol, - NoProxy: strings.Replace(regedit.ProxyOverride, ";", ",", -1), // to match linux style - }, - Automatic: ProxyScriptConf{ - Active: regedit.AutoConfigURL != "", - PreConfiguredURL: regedit.AutoConfigURL, - }, - } -} - -func readRegedit() (values regeditValues, err error) { - var proxySettingsPerUser uint64 = 1 // 1 is the default value to consider current user - k, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) - if err == nil { - //We had used the below variable tempPrxUsrSettings, because the Golang method GetIntegerValue - //sets the value to zero even it fails. - tempPrxUsrSettings, _, err := k.GetIntegerValue("ProxySettingsPerUser") - if err == nil { - //consider the value of tempPrxUsrSettings if it is a success - proxySettingsPerUser = tempPrxUsrSettings - } - k.Close() - } - - var hkey registry.Key - if proxySettingsPerUser == 0 { - hkey = registry.LOCAL_MACHINE - } else { - hkey = registry.CURRENT_USER - } - - k, err = registry.OpenKey(hkey, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) - if err != nil { - return - } - defer k.Close() - - values.ProxyServer, _, err = k.GetStringValue("ProxyServer") - if err != nil && err != registry.ErrNotExist { - return - } - values.ProxyOverride, _, err = k.GetStringValue("ProxyOverride") - if err != nil && err != registry.ErrNotExist { - return - } - - values.ProxyEnable, _, err = k.GetIntegerValue("ProxyEnable") - if err != nil && err != registry.ErrNotExist { - return - } - - values.AutoConfigURL, _, err = k.GetStringValue("AutoConfigURL") - if err != nil && err != registry.ErrNotExist { - return - } - err = nil - return -} diff --git a/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go deleted file mode 100644 index 30ebbd22a07..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package ieproxy - -import ( - "golang.org/x/sys/windows" - "unsafe" -) - -var kernel32 = windows.NewLazySystemDLL("kernel32.dll") -var globalFree = kernel32.NewProc("GlobalFree") - -func globalFreeWrapper(ptr *uint16) { - if ptr != nil { - _, _, _ = globalFree.Call(uintptr(unsafe.Pointer(ptr))) - } -} - -func rTrue(r uintptr) bool { - return r == 1 -} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_darwin.go b/vendor/github.com/mattn/go-ieproxy/pac_darwin.go deleted file mode 100644 index a8bf90e94d7..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/pac_darwin.go +++ /dev/null @@ -1,141 +0,0 @@ -package ieproxy - -/* -#cgo LDFLAGS: -framework CoreFoundation -#cgo LDFLAGS: -framework CFNetwork -#include -#include - -#define STR_LEN 128 - -void proxyAutoConfCallback(void* client, CFArrayRef proxies, CFErrorRef error) { - CFTypeRef* result_ptr = (CFTypeRef*)client; - if (error != NULL) { - *result_ptr = CFRetain(error); - } else { - *result_ptr = CFRetain(proxies); - } - CFRunLoopStop(CFRunLoopGetCurrent()); -} - -int intCFNumber(CFNumberRef num) { - int ret; - CFNumberGetValue(num, kCFNumberIntType, &ret); - return ret; -} - -char* _getProxyUrlFromPac(char* pac, char* reqCs) { - char* retCString = (char*)calloc(STR_LEN, sizeof(char)); - - CFStringRef reqStr = CFStringCreateWithCString(NULL, reqCs, kCFStringEncodingUTF8); - CFStringRef pacStr = CFStringCreateWithCString(NULL, pac, kCFStringEncodingUTF8); - CFURLRef pacUrl = CFURLCreateWithString(NULL, pacStr, NULL); - CFURLRef reqUrl = CFURLCreateWithString(NULL, reqStr, NULL); - - CFTypeRef result = NULL; - CFStreamClientContext context = { 0, &result, NULL, NULL, NULL }; - CFRunLoopSourceRef runloop_src = CFNetworkExecuteProxyAutoConfigurationURL(pacUrl, reqUrl, proxyAutoConfCallback, &context); - - if (runloop_src) { - const CFStringRef private_runloop_mode = CFSTR("go-ieproxy"); - CFRunLoopAddSource(CFRunLoopGetCurrent(), runloop_src, private_runloop_mode); - CFRunLoopRunInMode(private_runloop_mode, DBL_MAX, false); - CFRunLoopRemoveSource(CFRunLoopGetCurrent(), runloop_src, kCFRunLoopCommonModes); - - if (CFGetTypeID(result) == CFArrayGetTypeID()) { - CFArrayRef resultArray = (CFTypeRef)result; - if (CFArrayGetCount(resultArray) > 0) { - CFDictionaryRef pxy = (CFDictionaryRef)CFArrayGetValueAtIndex(resultArray, 0); - CFStringRef pxyType = CFDictionaryGetValue(pxy, kCFProxyTypeKey); - - if (CFEqual(pxyType, kCFProxyTypeNone)) { - // noop - } - - if (CFEqual(pxyType, kCFProxyTypeHTTP)) { - CFStringRef host = (CFStringRef)CFDictionaryGetValue(pxy, kCFProxyHostNameKey); - CFNumberRef port = (CFNumberRef)CFDictionaryGetValue(pxy, kCFProxyPortNumberKey); - - char host_str[STR_LEN - 16]; - CFStringGetCString(host, host_str, STR_LEN - 16, kCFStringEncodingUTF8); - - int port_int = 80; - if (port) { - CFNumberGetValue(port, kCFNumberIntType, &port_int); - } - - sprintf(retCString, "%s:%d", host_str, port_int); - } - } - } else { - // error - } - } - - CFRelease(result); - CFRelease(reqStr); - CFRelease(reqUrl); - CFRelease(pacStr); - CFRelease(pacUrl); - return retCString; -} - -char* _getPacUrl() { - char* retCString = (char*)calloc(STR_LEN, sizeof(char)); - CFDictionaryRef proxyDict = CFNetworkCopySystemProxySettings(); - CFNumberRef pacEnable = (CFNumberRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigEnable); - - if (pacEnable && intCFNumber(pacEnable)) { - CFStringRef pacUrlStr = (CFStringRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigURLString); - if (pacUrlStr) { - CFStringGetCString(pacUrlStr, retCString, STR_LEN, kCFStringEncodingUTF8); - } - } - - CFRelease(proxyDict); - return retCString; -} - -*/ -import "C" -import ( - "net/url" - "unsafe" -) - -func (psc *ProxyScriptConf) findProxyForURL(URL string) string { - if !psc.Active { - return "" - } - proxy := getProxyForURL(psc.PreConfiguredURL, URL) - return proxy -} - -func getProxyForURL(pacFileURL, targetURL string) string { - if pacFileURL == "" { - pacFileURL = getPacUrl() - } - if pacFileURL == "" { - return "" - } - if u, err := url.Parse(pacFileURL); err != nil || u.Scheme == "" { - return "" - } - - csUrl := C.CString(targetURL) - csPac := C.CString(pacFileURL) - csRet := C._getProxyUrlFromPac(csPac, csUrl) - - defer C.free(unsafe.Pointer(csUrl)) - defer C.free(unsafe.Pointer(csPac)) - defer C.free(unsafe.Pointer(csRet)) - - return C.GoString(csRet) -} - -func getPacUrl() string { - csRet := C._getPacUrl() - - defer C.free(unsafe.Pointer(csRet)) - return C.GoString(csRet) -} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_unix.go b/vendor/github.com/mattn/go-ieproxy/pac_unix.go deleted file mode 100644 index d4613cff695..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/pac_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows && (!darwin || !cgo) -// +build !windows,!darwin !cgo - -package ieproxy - -func (psc *ProxyScriptConf) findProxyForURL(URL string) string { - return "" -} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_windows.go b/vendor/github.com/mattn/go-ieproxy/pac_windows.go deleted file mode 100644 index 6a2ee677855..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/pac_windows.go +++ /dev/null @@ -1,72 +0,0 @@ -package ieproxy - -import ( - "strings" - "syscall" - "unsafe" -) - -func (psc *ProxyScriptConf) findProxyForURL(URL string) string { - if !psc.Active { - return "" - } - proxy, _ := getProxyForURL(psc.PreConfiguredURL, URL) - i := strings.Index(proxy, ";") - if i >= 0 { - return proxy[:i] - } - return proxy -} - -func getProxyForURL(pacfileURL, URL string) (string, error) { - pacfileURLPtr, err := syscall.UTF16PtrFromString(pacfileURL) - if err != nil { - return "", err - } - URLPtr, err := syscall.UTF16PtrFromString(URL) - if err != nil { - return "", err - } - - handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0) - if handle == 0 { - return "", err - } - defer winHttpCloseHandle.Call(handle) - - dwFlags := fWINHTTP_AUTOPROXY_CONFIG_URL - dwAutoDetectFlags := autoDetectFlag(0) - pfURLptr := pacfileURLPtr - - if pacfileURL == "" { - dwFlags = fWINHTTP_AUTOPROXY_AUTO_DETECT - dwAutoDetectFlags = fWINHTTP_AUTO_DETECT_TYPE_DNS_A | fWINHTTP_AUTO_DETECT_TYPE_DHCP - pfURLptr = nil - } - - options := tWINHTTP_AUTOPROXY_OPTIONS{ - dwFlags: dwFlags, // adding cache might cause issues: https://github.com/mattn/go-ieproxy/issues/6 - dwAutoDetectFlags: dwAutoDetectFlags, - lpszAutoConfigUrl: pfURLptr, - lpvReserved: nil, - dwReserved: 0, - fAutoLogonIfChallenged: true, // may not be optimal https://msdn.microsoft.com/en-us/library/windows/desktop/aa383153(v=vs.85).aspx - } // lpszProxyBypass isn't used as this only executes in cases where there (may) be a pac file (autodetect can fail), where lpszProxyBypass couldn't be returned. - // in the case that autodetect fails and no pre-specified pacfile is present, no proxy is returned. - - info := new(tWINHTTP_PROXY_INFO) - - ret, _, err := winHttpGetProxyForURL.Call( - handle, - uintptr(unsafe.Pointer(URLPtr)), - uintptr(unsafe.Pointer(&options)), - uintptr(unsafe.Pointer(info)), - ) - if ret > 0 { - err = nil - } - - defer globalFreeWrapper(info.lpszProxyBypass) - defer globalFreeWrapper(info.lpszProxy) - return StringFromUTF16Ptr(info.lpszProxy), err -} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go deleted file mode 100644 index b2ff9147b92..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go +++ /dev/null @@ -1,11 +0,0 @@ -package ieproxy - -import ( - "net/http" - "net/url" -) - -// GetProxyFunc is a forwarder for the OS-Exclusive proxyMiddleman_os.go files -func GetProxyFunc() func(*http.Request) (*url.URL, error) { - return proxyMiddleman() -} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go deleted file mode 100644 index a89948dca65..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go +++ /dev/null @@ -1,43 +0,0 @@ -package ieproxy - -import ( - "net/http" - "net/url" - - "golang.org/x/net/http/httpproxy" -) - -func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { - // Get the proxy configuration - conf := GetConf() - envCfg := httpproxy.FromEnvironment() - - if envCfg.HTTPProxy != "" || envCfg.HTTPSProxy != "" { - // If the user manually specifies environment variables, prefer those over the MacOS config. - return http.ProxyFromEnvironment - } - - return func(req *http.Request) (i *url.URL, e error) { - if conf.Automatic.Active { - host := conf.Automatic.FindProxyForURL(req.URL.String()) - if host != "" { - return &url.URL{Host: host}, nil - } - } - if conf.Static.Active { - return staticProxy(conf, req) - } - // Should return no proxy; fallthrough. - return http.ProxyFromEnvironment(req) - } -} - -func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { - // If static proxy obtaining is specified - proxy := httpproxy.Config{ - HTTPSProxy: conf.Static.Protocols["https"], - HTTPProxy: conf.Static.Protocols["http"], - NoProxy: conf.Static.NoProxy, - } - return proxy.ProxyFunc()(req.URL) -} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go deleted file mode 100644 index fe227a12ee2..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !windows && !darwin -// +build !windows,!darwin - -package ieproxy - -import ( - "net/http" - "net/url" -) - -func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { - // Fallthrough to ProxyFromEnvironment on all other OSes. - return http.ProxyFromEnvironment -} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go deleted file mode 100644 index 7d314dbf9ca..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go +++ /dev/null @@ -1,52 +0,0 @@ -package ieproxy - -import ( - "net/http" - "net/url" - - "golang.org/x/net/http/httpproxy" -) - -func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { - // Get the proxy configuration - conf := GetConf() - envcfg := httpproxy.FromEnvironment() - - if envcfg.HTTPProxy != "" || envcfg.HTTPSProxy != "" { - // If the user manually specifies environment variables, prefer those over the Windows config. - return http.ProxyFromEnvironment - } - - return func(req *http.Request) (i *url.URL, e error) { - if conf.Automatic.Active { - host := conf.Automatic.FindProxyForURL(req.URL.String()) - if host != "" { - return &url.URL{Host: host}, nil - } - } - if conf.Static.Active { - return staticProxy(conf, req) - } - // Should return no proxy; fallthrough. - return http.ProxyFromEnvironment(req) - } -} - -func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { - // If static proxy obtaining is specified - prox := httpproxy.Config{ - HTTPSProxy: mapFallback("https", "", conf.Static.Protocols), - HTTPProxy: mapFallback("http", "", conf.Static.Protocols), - NoProxy: conf.Static.NoProxy, - } - return prox.ProxyFunc()(req.URL) -} - -// Return oKey or fbKey if oKey doesn't exist in the map. -func mapFallback(oKey, fbKey string, m map[string]string) string { - if v, ok := m[oKey]; ok { - return v - } else { - return m[fbKey] - } -} diff --git a/vendor/github.com/mattn/go-ieproxy/utils.go b/vendor/github.com/mattn/go-ieproxy/utils.go deleted file mode 100644 index 353b231120a..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/utils.go +++ /dev/null @@ -1,23 +0,0 @@ -package ieproxy - -import ( - "unicode/utf16" - "unsafe" -) - -// StringFromUTF16Ptr converts a *uint16 C string to a Go String -func StringFromUTF16Ptr(s *uint16) string { - if s == nil { - return "" - } - - p := (*[1<<30 - 1]uint16)(unsafe.Pointer(s)) - - // find the string length - sz := 0 - for p[sz] != 0 { - sz++ - } - - return string(utf16.Decode(p[:sz:sz])) -} diff --git a/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go deleted file mode 100644 index 4d3b1677805..00000000000 --- a/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package ieproxy - -import "golang.org/x/sys/windows" - -var winHttp = windows.NewLazySystemDLL("winhttp.dll") -var winHttpGetProxyForURL = winHttp.NewProc("WinHttpGetProxyForUrl") -var winHttpOpen = winHttp.NewProc("WinHttpOpen") -var winHttpCloseHandle = winHttp.NewProc("WinHttpCloseHandle") -var winHttpGetIEProxyConfigForCurrentUser = winHttp.NewProc("WinHttpGetIEProxyConfigForCurrentUser") -var winHttpGetDefaultProxyConfiguration = winHttp.NewProc("WinHttpGetDefaultProxyConfiguration") - -type tWINHTTP_AUTOPROXY_OPTIONS struct { - dwFlags autoProxyFlag - dwAutoDetectFlags autoDetectFlag - lpszAutoConfigUrl *uint16 - lpvReserved *uint16 - dwReserved uint32 - fAutoLogonIfChallenged bool -} -type autoProxyFlag uint32 - -const ( - fWINHTTP_AUTOPROXY_AUTO_DETECT = autoProxyFlag(0x00000001) - fWINHTTP_AUTOPROXY_CONFIG_URL = autoProxyFlag(0x00000002) - fWINHTTP_AUTOPROXY_NO_CACHE_CLIENT = autoProxyFlag(0x00080000) - fWINHTTP_AUTOPROXY_NO_CACHE_SVC = autoProxyFlag(0x00100000) - fWINHTTP_AUTOPROXY_NO_DIRECTACCESS = autoProxyFlag(0x00040000) - fWINHTTP_AUTOPROXY_RUN_INPROCESS = autoProxyFlag(0x00010000) - fWINHTTP_AUTOPROXY_RUN_OUTPROCESS_ONLY = autoProxyFlag(0x00020000) - fWINHTTP_AUTOPROXY_SORT_RESULTS = autoProxyFlag(0x00400000) -) - -type autoDetectFlag uint32 - -const ( - fWINHTTP_AUTO_DETECT_TYPE_DHCP = autoDetectFlag(0x00000001) - fWINHTTP_AUTO_DETECT_TYPE_DNS_A = autoDetectFlag(0x00000002) -) - -type tWINHTTP_PROXY_INFO struct { - dwAccessType uint32 - lpszProxy *uint16 - lpszProxyBypass *uint16 -} - -type tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG struct { - fAutoDetect bool - lpszAutoConfigUrl *uint16 - lpszProxy *uint16 - lpszProxyBypass *uint16 -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4fe4783cc6d..365f558be99 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,21 +58,7 @@ filippo.io/edwards25519 filippo.io/edwards25519/field # github.com/Azure/azure-amqp-common-go/v4 v4.2.0 ## explicit; go 1.18 -github.com/Azure/azure-amqp-common-go/v4 -github.com/Azure/azure-amqp-common-go/v4/aad github.com/Azure/azure-amqp-common-go/v4/auth -github.com/Azure/azure-amqp-common-go/v4/cbs -github.com/Azure/azure-amqp-common-go/v4/conn -github.com/Azure/azure-amqp-common-go/v4/internal -github.com/Azure/azure-amqp-common-go/v4/internal/tracing -github.com/Azure/azure-amqp-common-go/v4/rpc -github.com/Azure/azure-amqp-common-go/v4/sas -github.com/Azure/azure-amqp-common-go/v4/uuid -# github.com/Azure/azure-event-hubs-go/v3 v3.6.2 -## explicit; go 1.18 -github.com/Azure/azure-event-hubs-go/v3 -github.com/Azure/azure-event-hubs-go/v3/atom -github.com/Azure/azure-event-hubs-go/v3/persist # github.com/Azure/azure-kusto-go v0.15.2 ## explicit; go 1.19 github.com/Azure/azure-kusto-go/kusto @@ -92,18 +78,10 @@ github.com/Azure/azure-kusto-go/kusto/kql github.com/Azure/azure-kusto-go/kusto/trustedendpoints github.com/Azure/azure-kusto-go/kusto/unsafe github.com/Azure/azure-kusto-go/kusto/utils -# github.com/Azure/azure-pipeline-go v0.2.3 -## explicit; go 1.14 -github.com/Azure/azure-pipeline-go/pipeline -# github.com/Azure/azure-sdk-for-go v68.0.0+incompatible -## explicit -github.com/Azure/azure-sdk-for-go/services/eventhub/mgmt/2017-04-01/eventhub -github.com/Azure/azure-sdk-for-go/services/keyvault/v7.0/keyvault -github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights -github.com/Azure/azure-sdk-for-go/version # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime @@ -128,7 +106,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -138,11 +116,22 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/telemetry github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 +# github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher -# github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 +# github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils +# github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/admin @@ -155,12 +144,42 @@ github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/sas github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/sbauth github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils -# github.com/Azure/azure-storage-blob-go v0.15.0 -## explicit; go 1.15 -github.com/Azure/azure-storage-blob-go/azblob -# github.com/Azure/azure-storage-queue-go v0.0.0-20230927153703-648530c9aaf2 -## explicit -github.com/Azure/azure-storage-queue-go/azqueue +# github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub +# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets +# github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service +# github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/base +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/generated +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/queueerror +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue/sas # github.com/Azure/go-amqp v1.0.5 ## explicit; go 1.18 github.com/Azure/go-amqp @@ -190,12 +209,6 @@ github.com/Azure/go-autorest/autorest/azure/cli # github.com/Azure/go-autorest/autorest/date v0.3.0 ## explicit; go 1.12 github.com/Azure/go-autorest/autorest/date -# github.com/Azure/go-autorest/autorest/to v0.4.0 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/to -# github.com/Azure/go-autorest/autorest/validation v0.3.1 -## explicit; go 1.12 -github.com/Azure/go-autorest/autorest/validation # github.com/Azure/go-autorest/logger v0.2.1 ## explicit; go 1.12 github.com/Azure/go-autorest/logger @@ -506,9 +519,6 @@ github.com/denisenkom/go-mssqldb/internal/cp github.com/denisenkom/go-mssqldb/internal/decimal github.com/denisenkom/go-mssqldb/internal/querytext github.com/denisenkom/go-mssqldb/msdsn -# github.com/devigned/tab v0.1.1 -## explicit; go 1.12 -github.com/devigned/tab # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous @@ -1021,9 +1031,6 @@ github.com/mailru/easyjson/jwriter # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-ieproxy v0.0.11 -## explicit; go 1.17 -github.com/mattn/go-ieproxy # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty From b0d891a96e71114397864d20113374c66c1eab62 Mon Sep 17 00:00:00 2001 From: rickbrouwer <75609067+rickbrouwer@users.noreply.github.com> Date: Thu, 25 Jul 2024 12:21:22 +0200 Subject: [PATCH 14/37] feat: add TLS support for IBM MQ scaler (#5976) * feat: add TLS auth support for IBM MQ scaler Signed-off-by: rickbrouwer <75609067+rickbrouwer@users.noreply.github.com> * fix: fix ibmmq scaler test Signed-off-by: Rick Brouwer * fix: correct httpClient Co-authored-by: Jorge Turrado Ferrero Signed-off-by: rickbrouwer <75609067+rickbrouwer@users.noreply.github.com> --------- Signed-off-by: rickbrouwer <75609067+rickbrouwer@users.noreply.github.com> Signed-off-by: Rick Brouwer Co-authored-by: Jorge Turrado Ferrero Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scalers/ibmmq_scaler.go | 70 ++++++++++++++++++++++++-------- pkg/scalers/ibmmq_scaler_test.go | 7 +++- 3 files changed, 60 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 619958b3204..245c90c27f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ Here is an overview of all new **experimental** features: - **Cassandra Scaler**: Add TLS support for cassandra scaler ([#5802](https://github.com/kedacore/keda/issues/5802)) - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) - **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) +- **IBM MQ Scaler**: Add TLS support for IBM MQ scaler ([#5974](https://github.com/kedacore/keda/issues/5974)) - **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) - **MYSQL Scaler**: Add support to fetch username from env ([#5883](https://github.com/kedacore/keda/issues/5883)) diff --git a/pkg/scalers/ibmmq_scaler.go b/pkg/scalers/ibmmq_scaler.go index 052a6c08cbc..efd83d8c968 100644 --- a/pkg/scalers/ibmmq_scaler.go +++ b/pkg/scalers/ibmmq_scaler.go @@ -31,6 +31,7 @@ type IBMMQScaler struct { metricType v2.MetricTargetType metadata *IBMMQMetadata defaultHTTPTimeout time.Duration + httpClient *http.Client logger logr.Logger } @@ -45,6 +46,13 @@ type IBMMQMetadata struct { activationQueueDepth int64 tlsDisabled bool triggerIndex int + + // TLS + ca string + cert string + key string + keyPassword string + unsafeSsl bool } // CommandResponse Full structured response from MQ admin REST query @@ -75,16 +83,31 @@ func NewIBMMQScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error parsing IBM MQ metadata: %w", err) } + httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.tlsDisabled) + + // Configure TLS if cert and key are specified + if meta.cert != "" && meta.key != "" { + tlsConfig, err := kedautil.NewTLSConfigWithPassword(meta.cert, meta.key, meta.keyPassword, meta.ca, meta.unsafeSsl) + if err != nil { + return nil, err + } + httpClient.Transport = kedautil.CreateHTTPTransportWithTLSConfig(tlsConfig) + } + return &IBMMQScaler{ metricType: metricType, metadata: meta, defaultHTTPTimeout: config.GlobalHTTPTimeout, + httpClient: httpClient, logger: InitializeLogger(config, "ibm_mq_scaler"), }, nil } // Close closes and returns nil func (s *IBMMQScaler) Close(context.Context) error { + if s.httpClient != nil { + s.httpClient.CloseIdleConnections() + } return nil } @@ -144,24 +167,38 @@ func parseIBMMQMetadata(config *scalersconfig.ScalerConfig) (*IBMMQMetadata, err fmt.Println("No tls setting defined - setting default") meta.tlsDisabled = defaultTLSDisabled } - val, ok := config.AuthParams["username"] - switch { - case ok && val != "": + + if val, ok := config.AuthParams["username"]; ok && val != "" { meta.username = val - case config.TriggerMetadata["usernameFromEnv"] != "": - meta.username = config.ResolvedEnv[config.TriggerMetadata["usernameFromEnv"]] - default: + } else if val, ok := config.TriggerMetadata["usernameFromEnv"]; ok && val != "" { + meta.username = config.ResolvedEnv[val] + } else { return nil, fmt.Errorf("no username given") } - pwdValue, booleanValue := config.AuthParams["password"] // booleanValue reports whether the type assertion succeeded or not - switch { - case booleanValue && pwdValue != "": - meta.password = pwdValue - case config.TriggerMetadata["passwordFromEnv"] != "": - meta.password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]] - default: + + if val, ok := config.AuthParams["password"]; ok && val != "" { + meta.password = val + } else if val, ok := config.TriggerMetadata["passwordFromEnv"]; ok && val != "" { + meta.password = config.ResolvedEnv[val] + } else { return nil, fmt.Errorf("no password given") } + + // TLS config (optional) + meta.ca = config.AuthParams["ca"] + meta.cert = config.AuthParams["cert"] + meta.key = config.AuthParams["key"] + meta.keyPassword = config.AuthParams["keyPassword"] + + meta.unsafeSsl = false + if val, ok := config.TriggerMetadata["unsafeSsl"]; ok { + boolVal, err := strconv.ParseBool(val) + if err != nil { + return nil, fmt.Errorf("failed to parse unsafeSsl value. Must be either true or false") + } + meta.unsafeSsl = boolVal + } + meta.triggerIndex = config.TriggerIndex return &meta, nil } @@ -178,11 +215,10 @@ func (s *IBMMQScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) { } req.Header.Set("ibm-mq-rest-csrf-token", "value") req.Header.Set("Content-Type", "application/json") - req.SetBasicAuth(s.metadata.username, s.metadata.password) - client := kedautil.CreateHTTPClient(s.defaultHTTPTimeout, s.metadata.tlsDisabled) + req.SetBasicAuth(s.metadata.username, s.metadata.password) - resp, err := client.Do(req) + resp, err := s.httpClient.Do(req) if err != nil { return 0, fmt.Errorf("failed to contact MQ via REST: %w", err) } @@ -190,7 +226,7 @@ func (s *IBMMQScaler) getQueueDepthViaHTTP(ctx context.Context) (int64, error) { body, err := io.ReadAll(resp.Body) if err != nil { - return 0, fmt.Errorf("failed to ready body of request: %w", err) + return 0, fmt.Errorf("failed to read body of request: %w", err) } var response CommandResponse diff --git a/pkg/scalers/ibmmq_scaler_test.go b/pkg/scalers/ibmmq_scaler_test.go index 5eade446363..c3e2430043f 100644 --- a/pkg/scalers/ibmmq_scaler_test.go +++ b/pkg/scalers/ibmmq_scaler_test.go @@ -63,12 +63,16 @@ var testIBMMQMetadata = []parseIBMMQMetadataTestData{ {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, // Invalid URL {map[string]string{"host": testInvalidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, - // Properly formed authParams + // Properly formed authParams Basic Auth {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123"}}, + // Properly formed authParams Basic Auth and TLS + {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, false, map[string]string{"username": "testUsername", "password": "Pass123", "ca": "cavalue", "cert": "certvalue", "key": "keyvalue"}}, // No username provided {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"password": "Pass123"}}, // No password provided {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10"}, true, map[string]string{"username": "testUsername"}}, + // Wrong input unsafeSsl + {map[string]string{"host": testValidMQQueueURL, "queueManager": "testQueueManager", "queueName": "testQueue", "queueDepth": "10", "unsafeSsl": "random"}, true, map[string]string{"username": "testUsername", "password": "Pass123"}}, } // Test MQ Connection metadata is parsed correctly @@ -216,6 +220,7 @@ func TestIBMMQScalerGetQueueDepthViaHTTP(t *testing.T) { metadata: &IBMMQMetadata{ host: server.URL, }, + httpClient: server.Client(), } value, err := scaler.getQueueDepthViaHTTP(context.Background()) From 0051955de1a6f0d57d82287aac9b799402b66840 Mon Sep 17 00:00:00 2001 From: Ben Woodley Date: Thu, 25 Jul 2024 12:22:49 +0200 Subject: [PATCH 15/37] feat: GCP Pub/Sub scaler add configurable fallback value when no metric value found (#5897) Signed-off-by: Benjamin Woodley Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scalers/gcp/gcp_stackdriver_client.go | 7 +++++-- pkg/scalers/gcp_pubsub_scaler.go | 11 ++++++++++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 245c90c27f5..8c03db799aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ Here is an overview of all new **experimental** features: ### Improvements - **Cassandra Scaler**: Add TLS support for cassandra scaler ([#5802](https://github.com/kedacore/keda/issues/5802)) +- **GCP Pub/Sub**: Add optional valueIfNull to allow a default scaling value and prevent errors when GCP metric returns no value. ([#5896](https://github.com/kedacore/keda/issues/5896)) - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) - **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) - **IBM MQ Scaler**: Add TLS support for IBM MQ scaler ([#5974](https://github.com/kedacore/keda/issues/5974)) diff --git a/pkg/scalers/gcp/gcp_stackdriver_client.go b/pkg/scalers/gcp/gcp_stackdriver_client.go index 7384f035b59..87893a3979e 100644 --- a/pkg/scalers/gcp/gcp_stackdriver_client.go +++ b/pkg/scalers/gcp/gcp_stackdriver_client.go @@ -288,7 +288,7 @@ func (s StackDriverClient) GetMetrics( // // MQL provides a more expressive query language than // the current filtering options of GetMetrics -func (s StackDriverClient) QueryMetrics(ctx context.Context, projectID, query string) (float64, error) { +func (s StackDriverClient) QueryMetrics(ctx context.Context, projectID, query string, valueIfNull *float64) (float64, error) { req := &monitoringpb.QueryTimeSeriesRequest{ Query: query, PageSize: 1, @@ -303,7 +303,10 @@ func (s StackDriverClient) QueryMetrics(ctx context.Context, projectID, query st resp, err := it.Next() if err == iterator.Done { - return value, fmt.Errorf("could not find stackdriver metric with query %s", req.Query) + if valueIfNull == nil { + return value, fmt.Errorf("could not find stackdriver metric with query %s", req.Query) + } + return *valueIfNull, nil } if err != nil { diff --git a/pkg/scalers/gcp_pubsub_scaler.go b/pkg/scalers/gcp_pubsub_scaler.go index dd9b9873ebc..1ba909e7e2f 100644 --- a/pkg/scalers/gcp_pubsub_scaler.go +++ b/pkg/scalers/gcp_pubsub_scaler.go @@ -49,6 +49,7 @@ type pubsubMetadata struct { triggerIndex int aggregation string timeHorizon string + valueIfNull *float64 } // NewPubSubScaler creates a new pubsubScaler @@ -179,6 +180,14 @@ func parsePubSubMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) } } + if val, ok := config.TriggerMetadata["valueIfNull"]; ok && val != "" { + valueIfNull, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("valueIfNull parsing error %w", err) + } + meta.valueIfNull = &valueIfNull + } + meta.aggregation = config.TriggerMetadata["aggregation"] meta.timeHorizon = config.TriggerMetadata["timeHorizon"] @@ -291,7 +300,7 @@ func (s *pubsubScaler) getMetrics(ctx context.Context, metricType string) (float // Pubsub metrics are collected every 60 seconds so no need to aggregate them. // See: https://cloud.google.com/monitoring/api/metrics_gcp#gcp-pubsub - return s.client.QueryMetrics(ctx, projectID, query) + return s.client.QueryMetrics(ctx, projectID, query, s.metadata.valueIfNull) } func getResourceData(s *pubsubScaler) (string, string) { From f338ae3322cfb839cdbec84f97d74636a1f2860a Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Thu, 25 Jul 2024 13:48:54 +0200 Subject: [PATCH 16/37] fix: Exclude test files from semgrep scan (#5982) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .github/workflows/static-analysis-semgrep.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/static-analysis-semgrep.yml b/.github/workflows/static-analysis-semgrep.yml index 91eea586c09..f7695c00d3b 100644 --- a/.github/workflows/static-analysis-semgrep.yml +++ b/.github/workflows/static-analysis-semgrep.yml @@ -2,7 +2,7 @@ name: "Semgrep" on: push: - branches: [ "main" ] + branches: ["main"] pull_request_target: {} concurrency: @@ -12,7 +12,7 @@ concurrency: jobs: semgrep: name: Analyze Semgrep - runs-on: ubuntu-latest + runs-on: ARM64 container: returntocorp/semgrep if: (github.actor != 'dependabot[bot]') steps: @@ -30,7 +30,7 @@ jobs: apk add github-cli gh pr checkout ${{ github.event.number }} - - run: semgrep ci --sarif --output=semgrep.sarif + - run: semgrep ci --exclude=test --exclude=test --sarif --output=semgrep.sarif env: SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} From f647037f6c8b87f7e971c8bd95c9feb33ca8b39e Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Thu, 25 Jul 2024 13:59:49 +0200 Subject: [PATCH 17/37] fix: use AMD machines again for semgrep workflow (#5983) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .github/workflows/static-analysis-semgrep.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/static-analysis-semgrep.yml b/.github/workflows/static-analysis-semgrep.yml index f7695c00d3b..1b68a04d000 100644 --- a/.github/workflows/static-analysis-semgrep.yml +++ b/.github/workflows/static-analysis-semgrep.yml @@ -12,7 +12,7 @@ concurrency: jobs: semgrep: name: Analyze Semgrep - runs-on: ARM64 + runs-on: ubuntu-latest container: returntocorp/semgrep if: (github.actor != 'dependabot[bot]') steps: From 01715b92793ac72e684d2a7819de6a1780bed9a2 Mon Sep 17 00:00:00 2001 From: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> Date: Mon, 29 Jul 2024 11:07:16 +0200 Subject: [PATCH 18/37] add core logic to support access token in postgres scaler (#5589) * add core logic to support access token in postgres scaler Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * minor fix Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * run make build to fmt code Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * make regexp password pattern global Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * adapt to use placeholder for regexp Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * add missing authPodIdentity variable Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * lint code using gci write... command Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * lint import + add 2 unite tests Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * lint with make fmt Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * remove podIdentityAzure references (but keep AzureWorkload ones) Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * replace switch by if statements + fix error when comparing + close connection before recreating it Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * generate a new token if the current one has expired + add log info statement Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * minor change + add entry in CHANGELOG.md Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * Add first draft of an e2e test Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * Add comment and change package name Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * fix golanci lint Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * use identity 1 in e2e tests Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * fix e2e tests after testing it + change .env file Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * go fmt Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * remove entries in .env file Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * Add Postgres env variables Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * remove useless variables Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> * Update e2e test to reset all the task using a query Signed-off-by: Jorge Turrado * missing changes after rebase Signed-off-by: Jorge Turrado * fix typo in the query Signed-off-by: Jorge Turrado Ferrero * remove the load Signed-off-by: Jorge Turrado * fix style Signed-off-by: Jorge Turrado --------- Signed-off-by: Ferdinand de Baecque <45566171+Ferdinanddb@users.noreply.github.com> Signed-off-by: Jorge Turrado Signed-off-by: Jorge Turrado Ferrero Signed-off-by: Jorge Turrado Co-authored-by: Jorge Turrado Co-authored-by: Jorge Turrado Ferrero Signed-off-by: novoselov --- CHANGELOG.md | 1 + go.sum | 13 +- pkg/scalers/postgresql_scaler.go | 210 ++++++++++----- pkg/scalers/postgresql_scaler_test.go | 28 +- pkg/scaling/scalers_builder.go | 2 +- ...zure_postgresql_flex_server_aad_wi_test.go | 244 ++++++++++++++++++ 6 files changed, 427 insertions(+), 71 deletions(-) create mode 100644 tests/scalers/postgresql/azure_postgresql_flex_server_aad_wi/azure_postgresql_flex_server_aad_wi_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c03db799aa..0c83a8e186a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ Here is an overview of all new **experimental** features: - **IBM MQ Scaler**: Add TLS support for IBM MQ scaler ([#5974](https://github.com/kedacore/keda/issues/5974)) - **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) - **MYSQL Scaler**: Add support to fetch username from env ([#5883](https://github.com/kedacore/keda/issues/5883)) +- **Postgres Scaler**: Add support for access token authentication to an Azure Postgres Flexible Server ([#5823](https://github.com/kedacore/keda/issues/5823)) ### Fixes diff --git a/go.sum b/go.sum index c87806ce063..5b72c848fcc 100644 --- a/go.sum +++ b/go.sum @@ -1930,16 +1930,15 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -2023,6 +2022,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jstemmer/go-junit-report/v2 v2.1.0 h1:X3+hPYlSczH9IMIpSC9CQSZA0L+BipYafciZUWHEmsc= +github.com/jstemmer/go-junit-report/v2 v2.1.0/go.mod h1:mgHVr7VUo5Tn8OLVr1cKnLuEy0M92wdRntM99h7RkgQ= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -2074,7 +2075,6 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -2284,7 +2284,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go index 00f1edada2f..f3133cc14ad 100644 --- a/pkg/scalers/postgresql_scaler.go +++ b/pkg/scalers/postgresql_scaler.go @@ -4,24 +4,41 @@ import ( "context" "database/sql" "fmt" + "regexp" "strconv" "strings" + "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/go-logr/logr" - // PostreSQL drive required for this scaler - _ "github.com/jackc/pgx/v5/stdlib" + _ "github.com/jackc/pgx/v5/stdlib" // PostreSQL drive required for this scaler v2 "k8s.io/api/autoscaling/v2" "k8s.io/metrics/pkg/apis/external_metrics" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/scalers/azure" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" kedautil "github.com/kedacore/keda/v2/pkg/util" ) +const ( + // Azure AD resource ID for Azure Database for PostgreSQL is https://ossrdbms-aad.database.windows.net + // https://learn.microsoft.com/en-us/azure/postgresql/single-server/how-to-connect-with-managed-identity + azureDatabasePostgresResource = "https://ossrdbms-aad.database.windows.net/.default" +) + +var ( + passwordConnPattern = regexp.MustCompile(`%PASSWORD%`) +) + type postgreSQLScaler struct { - metricType v2.MetricTargetType - metadata *postgreSQLMetadata - connection *sql.DB - logger logr.Logger + metricType v2.MetricTargetType + metadata *postgreSQLMetadata + connection *sql.DB + podIdentity kedav1alpha1.AuthPodIdentity + logger logr.Logger } type postgreSQLMetadata struct { @@ -30,10 +47,16 @@ type postgreSQLMetadata struct { connection string query string triggerIndex int + azureAuthContext azureAuthContext +} + +type azureAuthContext struct { + cred *azidentity.ChainedTokenCredential + token *azcore.AccessToken } // NewPostgreSQLScaler creates a new postgreSQL scaler -func NewPostgreSQLScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { +func NewPostgreSQLScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %w", err) @@ -41,43 +64,46 @@ func NewPostgreSQLScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { logger := InitializeLogger(config, "postgresql_scaler") - meta, err := parsePostgreSQLMetadata(config) + meta, podIdentity, err := parsePostgreSQLMetadata(logger, config) if err != nil { return nil, fmt.Errorf("error parsing postgreSQL metadata: %w", err) } - conn, err := getConnection(meta, logger) + conn, err := getConnection(ctx, meta, podIdentity, logger) if err != nil { return nil, fmt.Errorf("error establishing postgreSQL connection: %w", err) } return &postgreSQLScaler{ - metricType: metricType, - metadata: meta, - connection: conn, - logger: logger, + metricType: metricType, + metadata: meta, + connection: conn, + podIdentity: podIdentity, + logger: logger, }, nil } -func parsePostgreSQLMetadata(config *scalersconfig.ScalerConfig) (*postgreSQLMetadata, error) { +func parsePostgreSQLMetadata(logger logr.Logger, config *scalersconfig.ScalerConfig) (*postgreSQLMetadata, kedav1alpha1.AuthPodIdentity, error) { meta := postgreSQLMetadata{} + authPodIdentity := kedav1alpha1.AuthPodIdentity{} + if val, ok := config.TriggerMetadata["query"]; ok { meta.query = val } else { - return nil, fmt.Errorf("no query given") + return nil, authPodIdentity, fmt.Errorf("no query given") } if val, ok := config.TriggerMetadata["targetQueryValue"]; ok { targetQueryValue, err := strconv.ParseFloat(val, 64) if err != nil { - return nil, fmt.Errorf("queryValue parsing error %w", err) + return nil, authPodIdentity, fmt.Errorf("queryValue parsing error %w", err) } meta.targetQueryValue = targetQueryValue } else { if config.AsMetricSource { meta.targetQueryValue = 0 } else { - return nil, fmt.Errorf("no targetQueryValue given") + return nil, authPodIdentity, fmt.Errorf("no targetQueryValue given") } } @@ -85,65 +111,103 @@ func parsePostgreSQLMetadata(config *scalersconfig.ScalerConfig) (*postgreSQLMet if val, ok := config.TriggerMetadata["activationTargetQueryValue"]; ok { activationTargetQueryValue, err := strconv.ParseFloat(val, 64) if err != nil { - return nil, fmt.Errorf("activationTargetQueryValue parsing error %w", err) + return nil, authPodIdentity, fmt.Errorf("activationTargetQueryValue parsing error %w", err) } meta.activationTargetQueryValue = activationTargetQueryValue } - switch { - case config.AuthParams["connection"] != "": - meta.connection = config.AuthParams["connection"] - case config.TriggerMetadata["connectionFromEnv"] != "": - meta.connection = config.ResolvedEnv[config.TriggerMetadata["connectionFromEnv"]] - default: - host, err := GetFromAuthOrMeta(config, "host") - if err != nil { - return nil, err - } + switch config.PodIdentity.Provider { + case "", kedav1alpha1.PodIdentityProviderNone: + switch { + case config.AuthParams["connection"] != "": + meta.connection = config.AuthParams["connection"] + case config.TriggerMetadata["connectionFromEnv"] != "": + meta.connection = config.ResolvedEnv[config.TriggerMetadata["connectionFromEnv"]] + default: + params, err := buildConnArray(config) + if err != nil { + return nil, authPodIdentity, fmt.Errorf("failed to parse fields related to the connection") + } - port, err := GetFromAuthOrMeta(config, "port") - if err != nil { - return nil, err + var password string + if config.AuthParams["password"] != "" { + password = config.AuthParams["password"] + } else if config.TriggerMetadata["passwordFromEnv"] != "" { + password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]] + } + params = append(params, "password="+escapePostgreConnectionParameter(password)) + meta.connection = strings.Join(params, " ") } - - userName, err := GetFromAuthOrMeta(config, "userName") + case kedav1alpha1.PodIdentityProviderAzureWorkload: + params, err := buildConnArray(config) if err != nil { - return nil, err + return nil, authPodIdentity, fmt.Errorf("failed to parse fields related to the connection") } - dbName, err := GetFromAuthOrMeta(config, "dbName") + cred, err := azure.NewChainedCredential(logger, config.PodIdentity) if err != nil { - return nil, err + return nil, authPodIdentity, err } + meta.azureAuthContext.cred = cred + authPodIdentity = kedav1alpha1.AuthPodIdentity{Provider: config.PodIdentity.Provider} - sslmode, err := GetFromAuthOrMeta(config, "sslmode") - if err != nil { - return nil, err - } - - var password string - if config.AuthParams["password"] != "" { - password = config.AuthParams["password"] - } else if config.TriggerMetadata["passwordFromEnv"] != "" { - password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]] - } - - // Build connection str - var params []string - params = append(params, "host="+escapePostgreConnectionParameter(host)) - params = append(params, "port="+escapePostgreConnectionParameter(port)) - params = append(params, "user="+escapePostgreConnectionParameter(userName)) - params = append(params, "dbname="+escapePostgreConnectionParameter(dbName)) - params = append(params, "sslmode="+escapePostgreConnectionParameter(sslmode)) - params = append(params, "password="+escapePostgreConnectionParameter(password)) + params = append(params, "%PASSWORD%") meta.connection = strings.Join(params, " ") } meta.triggerIndex = config.TriggerIndex - return &meta, nil + + return &meta, authPodIdentity, nil +} + +func buildConnArray(config *scalersconfig.ScalerConfig) ([]string, error) { + var params []string + + host, err := GetFromAuthOrMeta(config, "host") + if err != nil { + return nil, err + } + + port, err := GetFromAuthOrMeta(config, "port") + if err != nil { + return nil, err + } + + userName, err := GetFromAuthOrMeta(config, "userName") + if err != nil { + return nil, err + } + + dbName, err := GetFromAuthOrMeta(config, "dbName") + if err != nil { + return nil, err + } + + sslmode, err := GetFromAuthOrMeta(config, "sslmode") + if err != nil { + return nil, err + } + params = append(params, "host="+escapePostgreConnectionParameter(host)) + params = append(params, "port="+escapePostgreConnectionParameter(port)) + params = append(params, "user="+escapePostgreConnectionParameter(userName)) + params = append(params, "dbname="+escapePostgreConnectionParameter(dbName)) + params = append(params, "sslmode="+escapePostgreConnectionParameter(sslmode)) + + return params, nil } -func getConnection(meta *postgreSQLMetadata, logger logr.Logger) (*sql.DB, error) { - db, err := sql.Open("pgx", meta.connection) +func getConnection(ctx context.Context, meta *postgreSQLMetadata, podIdentity kedav1alpha1.AuthPodIdentity, logger logr.Logger) (*sql.DB, error) { + connectionString := meta.connection + + if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAzureWorkload { + accessToken, err := getAzureAccessToken(ctx, meta, azureDatabasePostgresResource) + if err != nil { + return nil, err + } + newPasswordField := "password=" + escapePostgreConnectionParameter(accessToken) + connectionString = passwordConnPattern.ReplaceAllString(meta.connection, newPasswordField) + } + + db, err := sql.Open("pgx", connectionString) if err != nil { logger.Error(err, fmt.Sprintf("Found error opening postgreSQL: %s", err)) return nil, err @@ -168,6 +232,19 @@ func (s *postgreSQLScaler) Close(context.Context) error { func (s *postgreSQLScaler) getActiveNumber(ctx context.Context) (float64, error) { var id float64 + + if s.podIdentity.Provider == kedav1alpha1.PodIdentityProviderAzureWorkload { + if s.metadata.azureAuthContext.token.ExpiresOn.Before(time.Now()) { + s.logger.Info("The Azure Access Token expired, retrieving a new Azure Access Token and instantiating a new Postgres connection object.") + s.connection.Close() + newConnection, err := getConnection(ctx, s.metadata, s.podIdentity, s.logger) + if err != nil { + return 0, fmt.Errorf("error establishing postgreSQL connection: %w", err) + } + s.connection = newConnection + } + } + err := s.connection.QueryRowContext(ctx, s.metadata.query).Scan(&id) if err != nil { s.logger.Error(err, fmt.Sprintf("could not query postgreSQL: %s", err)) @@ -210,3 +287,18 @@ func escapePostgreConnectionParameter(str string) string { str = strings.ReplaceAll(str, "'", "\\'") return fmt.Sprintf("'%s'", str) } + +func getAzureAccessToken(ctx context.Context, metadata *postgreSQLMetadata, scope string) (string, error) { + accessToken, err := metadata.azureAuthContext.cred.GetToken(ctx, policy.TokenRequestOptions{ + Scopes: []string{ + scope, + }, + }) + if err != nil { + return "", err + } + + metadata.azureAuthContext.token = &accessToken + + return metadata.azureAuthContext.token.Token, nil +} diff --git a/pkg/scalers/postgresql_scaler_test.go b/pkg/scalers/postgresql_scaler_test.go index 02e9fe9d157..3f79d3a4319 100644 --- a/pkg/scalers/postgresql_scaler_test.go +++ b/pkg/scalers/postgresql_scaler_test.go @@ -6,6 +6,7 @@ import ( "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" ) @@ -45,11 +46,11 @@ var postgreSQLMetricIdentifiers = []postgreSQLMetricIdentifier{ func TestPosgresSQLGetMetricSpecForScaling(t *testing.T) { for _, testData := range postgreSQLMetricIdentifiers { - meta, err := parsePostgreSQLMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authParam, TriggerIndex: testData.scaleIndex}) + meta, _, err := parsePostgreSQLMetadata(logr.Discard(), &scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authParam, TriggerIndex: testData.scaleIndex}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockPostgresSQLScaler := postgreSQLScaler{"", meta, nil, logr.Discard()} + mockPostgresSQLScaler := postgreSQLScaler{"", meta, nil, kedav1alpha1.AuthPodIdentity{}, logr.Discard()} metricSpec := mockPostgresSQLScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name @@ -79,7 +80,26 @@ var testPostgreSQLConnectionstring = []postgreSQLConnectionStringTestData{ func TestPosgresSQLConnectionStringGeneration(t *testing.T) { for _, testData := range testPostgreSQLConnectionstring { - meta, err := parsePostgreSQLMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParam, TriggerIndex: 0}) + meta, _, err := parsePostgreSQLMetadata(logr.Discard(), &scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParam, TriggerIndex: 0}) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } + + if meta.connection != testData.connectionString { + t.Errorf("Error generating connectionString, expected '%s' and get '%s'", testData.connectionString, meta.connection) + } + } +} + +var testPodIdentityAzureWorkloadPostgreSQLConnectionstring = []postgreSQLConnectionStringTestData{ + // from meta + {metadata: map[string]string{"query": "test_query", "targetQueryValue": "5", "host": "localhost", "port": "1234", "dbName": "testDb", "userName": "user", "sslmode": "required"}, connectionString: "host=localhost port=1234 user=user dbname=testDb sslmode=required %PASSWORD%"}, +} + +func TestPodIdentityAzureWorkloadPosgresSQLConnectionStringGeneration(t *testing.T) { + identityID := "IDENTITY_ID_CORRESPONDING_TO_USERNAME_FIELD" + for _, testData := range testPodIdentityAzureWorkloadPostgreSQLConnectionstring { + meta, _, err := parsePostgreSQLMetadata(logr.Discard(), &scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzureWorkload, IdentityID: &identityID}, AuthParams: testData.authParam, TriggerIndex: 0}) if err != nil { t.Fatal("Could not parse metadata:", err) } @@ -135,7 +155,7 @@ var testPostgresMetadata = []parsePostgresMetadataTestData{ func TestParsePosgresSQLMetadata(t *testing.T) { for _, testData := range testPostgresMetadata { - _, err := parsePostgreSQLMetadata(&scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + _, _, err := parsePostgreSQLMetadata(logr.Discard(), &scalersconfig.ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) if err != nil && !testData.raisesError { t.Error("Expected success but got error", err) } diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index ed1895a6b75..702521f7719 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -218,7 +218,7 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, case "openstack-swift": return scalers.NewOpenstackSwiftScaler(config) case "postgresql": - return scalers.NewPostgreSQLScaler(config) + return scalers.NewPostgreSQLScaler(ctx, config) case "predictkube": return scalers.NewPredictKubeScaler(ctx, config) case "prometheus": diff --git a/tests/scalers/postgresql/azure_postgresql_flex_server_aad_wi/azure_postgresql_flex_server_aad_wi_test.go b/tests/scalers/postgresql/azure_postgresql_flex_server_aad_wi/azure_postgresql_flex_server_aad_wi_test.go new file mode 100644 index 00000000000..0281b337ff2 --- /dev/null +++ b/tests/scalers/postgresql/azure_postgresql_flex_server_aad_wi/azure_postgresql_flex_server_aad_wi_test.go @@ -0,0 +1,244 @@ +//go:build e2e +// +build e2e + +package azure_postgresql_flex_server_aad_wi_test + +import ( + "encoding/base64" + "fmt" + "os" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + pg "github.com/kedacore/keda/v2/tests/scalers/postgresql/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "azure-postgresql-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthenticationName = fmt.Sprintf("%s-ta", testName) + secretName = fmt.Sprintf("%s-secret", testName) + secretKey = "postgresql_conn_str" + postgreSQLStatefulSetName = "azure-postgresql" + postgresqlPodName = fmt.Sprintf("%s-0", postgreSQLStatefulSetName) + azurePostgreSQLAdminUsername = os.Getenv("TF_AZURE_POSTGRES_ADMIN_USERNAME") + azurePostgreSQLAdminPassword = os.Getenv("TF_AZURE_POSTGRES_ADMIN_PASSWORD") + azurePostgreSQLFQDN = os.Getenv("TF_AZURE_POSTGRES_FQDN") + azurePostgreSQLDatabase = os.Getenv("TF_AZURE_POSTGRES_DB_NAME") + azurePostgreSQLUamiName = os.Getenv("TF_AZURE_IDENTITY_1_NAME") + azurePostgreSQLConnectionString = GetAzureConnectionString(azurePostgreSQLAdminUsername, azurePostgreSQLAdminPassword, azurePostgreSQLFQDN, azurePostgreSQLDatabase) + localPostgreSQLUsername = "test-user" + localPostgreSQLPassword = "test-password" + localPostgreSQLDatabase = "test_db" + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + SecretKey string + PostgreSQLImage string + PostgreSQLStatefulSetName string + AzurePostgreSQLConnectionStringBase64 string + AzurePostgreSQLAdminUsername string + AzurePostgreSQLAdminPassword string + AzurePostgreSQLFQDN string + AzurePostgreSQLDatabase string + AzurePostgreSQLUamiName string + PostgreSQLUsername string + PostgreSQLPassword string + PostgreSQLDatabase string + MinReplicaCount int + MaxReplicaCount int +} + +const ( + azureSecretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + postgresql_conn_str: {{.AzurePostgreSQLConnectionStringBase64}} +` + + azureTriggerAuthTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + podIdentity: + provider: azure-workload +` + + azureScaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 10 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + triggers: + - type: postgresql + metadata: + host: {{.AzurePostgreSQLFQDN}} + port: "5432" + userName: {{.AzurePostgreSQLUamiName}} + dbName: {{.AzurePostgreSQLDatabase}} + sslmode: require + targetQueryValue: "4" + activationTargetQueryValue: "5" + query: "SELECT CEIL(COUNT(*) / 5) FROM task_instance WHERE state='running' OR state='queued'" + authenticationRef: + name: {{.TriggerAuthenticationName}} +` +) + +func TestPostreSQLScaler(t *testing.T) { + kc := GetKubernetesClient(t) + _, postgreSQLtemplates := getPostgreSQLTemplateData() + _, templates := getTemplateData() + t.Cleanup(func() { + // Delete table on remote Azure Postgres Flexible server + deleteTableSQL := "DROP TABLE IF EXISTS task_instance;" + delOk, delOut, delErrOut, delErr := WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlPodName, testNamespace, + fmt.Sprintf("PGPASSWORD=%s psql -h %s -p 5432 -U %s -d %s -c \"%s\"", azurePostgreSQLAdminPassword, azurePostgreSQLFQDN, azurePostgreSQLAdminUsername, azurePostgreSQLDatabase, deleteTableSQL), 60, 3) + require.True(t, delOk, "executing a command on PostreSQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", delOut, delErrOut, delErr) + + KubectlDeleteMultipleWithTemplate(t, data, templates) + DeleteKubernetesResources(t, testNamespace, data, postgreSQLtemplates) + }) + + // Create kubernetes resources for local PostgreSQL server + CreateKubernetesResources(t, kc, testNamespace, data, postgreSQLtemplates) + + require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, postgreSQLStatefulSetName, testNamespace, 1, 60, 3), + "replica count should be %d after 3 minutes", 1) + + // Delete table on remote Azure Postgres Flexible server + deleteTableSQL := "DROP TABLE IF EXISTS task_instance;" + delOk, delOut, delErrOut, delErr := WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlPodName, testNamespace, + fmt.Sprintf("PGPASSWORD=%s psql -h %s -p 5432 -U %s -d %s -c \"%s\"", azurePostgreSQLAdminPassword, azurePostgreSQLFQDN, azurePostgreSQLAdminUsername, azurePostgreSQLDatabase, deleteTableSQL), 60, 3) + require.True(t, delOk, "executing a command on PostreSQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", delOut, delErrOut, delErr) + + // Create table on remote Azure Postgres Flexible server + createTableSQL := "CREATE TABLE IF NOT EXISTS task_instance (id serial PRIMARY KEY,state VARCHAR(10));" + ok, out, errOut, err := WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlPodName, testNamespace, + fmt.Sprintf(`PGPASSWORD=%s psql -h %s -p 5432 -U %s -d %s -c "%s"`, azurePostgreSQLAdminPassword, azurePostgreSQLFQDN, azurePostgreSQLAdminUsername, azurePostgreSQLDatabase, createTableSQL), 60, 3) + require.True(t, ok, "executing a command on PostreSQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) + + grantPrivilegesSQL := fmt.Sprintf(`GRANT ALL ON task_instance TO \"%s\";`, azurePostgreSQLUamiName) + grantOk, grantOut, grantErrOut, grantErr := WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlPodName, testNamespace, + fmt.Sprintf("PGPASSWORD=%s psql -h %s -p 5432 -U %s -d %s -c \"%v\"", azurePostgreSQLAdminPassword, azurePostgreSQLFQDN, azurePostgreSQLAdminUsername, azurePostgreSQLDatabase, grantPrivilegesSQL), 60, 3) + require.True(t, grantOk, "executing a command on PostreSQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", grantOut, grantErrOut, grantErr) + + // Create kubernetes resources for testing + KubectlApplyMultipleWithTemplate(t, data, templates) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) + + testActivation(t, kc, data) + testScaleOut(t, kc, data) + testScaleIn(t, kc) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation ---") + KubectlReplaceWithTemplate(t, data, "lowLevelRecordsJobTemplate", pg.LowLevelRecordsJobTemplate) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale out ---") + KubectlReplaceWithTemplate(t, data, "insertRecordsJobTemplate", pg.InsertRecordsJobTemplate) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale in ---") + + // As Azure Database is slower than in cluster database (0 latency vs external service) + // we need to remove the load and manually trigger the scaling in condition in the database + + // Remove the load + KubectlDeleteWithTemplate(t, data, "lowLevelRecordsJobTemplate", pg.LowLevelRecordsJobTemplate) + KubectlDeleteWithTemplate(t, data, "insertRecordsJobTemplate", pg.InsertRecordsJobTemplate) + // Update all the instances + updateRecords := "UPDATE task_instance SET state = 'processed';" + ok, out, errOut, err := WaitForSuccessfulExecCommandOnSpecificPod(t, postgresqlPodName, testNamespace, + fmt.Sprintf(`PGPASSWORD=%s psql -h %s -p 5432 -U %s -d %s -c "%s"`, azurePostgreSQLAdminPassword, azurePostgreSQLFQDN, azurePostgreSQLAdminUsername, azurePostgreSQLDatabase, updateRecords), 60, 3) + require.True(t, ok, "executing a command on PostreSQL Pod should work; Output: %s, ErrorOutput: %s, Error: %s", out, errOut, err) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +var data = templateData{ + TestNamespace: testNamespace, + PostgreSQLStatefulSetName: postgreSQLStatefulSetName, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, + TriggerAuthenticationName: triggerAuthenticationName, + SecretName: secretName, + SecretKey: secretKey, + PostgreSQLImage: pg.PostgresqlImage, + AzurePostgreSQLAdminUsername: azurePostgreSQLAdminUsername, + AzurePostgreSQLAdminPassword: azurePostgreSQLAdminPassword, + AzurePostgreSQLDatabase: azurePostgreSQLDatabase, + AzurePostgreSQLFQDN: azurePostgreSQLFQDN, + AzurePostgreSQLUamiName: azurePostgreSQLUamiName, + AzurePostgreSQLConnectionStringBase64: base64.StdEncoding.EncodeToString([]byte(azurePostgreSQLConnectionString)), + PostgreSQLUsername: localPostgreSQLUsername, + PostgreSQLPassword: localPostgreSQLPassword, + PostgreSQLDatabase: localPostgreSQLDatabase, +} + +func getPostgreSQLTemplateData() (templateData, []Template) { + return data, []Template{ + {Name: "postgreSQLStatefulSetTemplate", Config: pg.PostgreSQLStatefulSetTemplate}, + } +} + +func getTemplateData() (templateData, []Template) { + return data, []Template{ + {Name: "azureSecretTemplate", Config: azureSecretTemplate}, + {Name: "deploymentTemplate", Config: pg.DeploymentTemplate}, + {Name: "azureTriggerAuthenticationTemplate", Config: azureTriggerAuthTemplate}, + {Name: "azureScaledObjectTemplate", Config: azureScaledObjectTemplate}, + } +} + +func GetAzureConnectionString(username string, password string, fqdn string, database string) string { + return fmt.Sprintf("postgresql://%s:%s@%s:5432/%s?sslmode=require", username, password, fqdn, database) +} From 9a55f34b58c20383f46c53988b6f865807bfed83 Mon Sep 17 00:00:00 2001 From: John Kyros Date: Mon, 29 Jul 2024 04:18:44 -0500 Subject: [PATCH 19/37] Add missing TestNamespace to ScaledJob validation test template (#5989) This scaledjob test template was missing its namespace, which doesn't generally matter for the test -- the test is just checking if the webhook works, it doesn't care where the scaledjob ends up. Where it does matter, is if you happen to run this test suite in a more restrictive environment where you can't write to the default namespace, because you fail with a namespace-related creation error instead of the expected "no triggers defined in the ScaledObject/ScaledJob" error. This just adds the namespace to the template so it's just like all the other ones in the test suite. Signed-off-by: John Kyros Signed-off-by: novoselov --- .../scaled_job_validation/scaled_job_validation_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/internals/scaled_job_validation/scaled_job_validation_test.go b/tests/internals/scaled_job_validation/scaled_job_validation_test.go index 62a2680e30f..217c3cc8c85 100644 --- a/tests/internals/scaled_job_validation/scaled_job_validation_test.go +++ b/tests/internals/scaled_job_validation/scaled_job_validation_test.go @@ -32,6 +32,7 @@ apiVersion: keda.sh/v1alpha1 kind: ScaledJob metadata: name: {{.EmptyTriggersSjName}} + namespace: {{.TestNamespace}} spec: jobTargetRef: template: From c5702b716fb1626c8496be0442fe2815f968dada Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Mon, 29 Jul 2024 11:35:49 +0200 Subject: [PATCH 20/37] chore: Use CNCF runner to execute PR e2e tests (#5992) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .github/workflows/pr-e2e.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 680f1b247eb..7a0207a610a 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -146,7 +146,7 @@ jobs: run-test: needs: [triage, build-test-images] - runs-on: e2e + runs-on: equinix-keda-runner name: Execute e2e tests container: ghcr.io/kedacore/keda-tools:1.21.9 if: needs.triage.outputs.run-e2e == 'true' @@ -190,7 +190,7 @@ jobs: AWS_RUN_IDENTITY_TESTS: true AZURE_RUN_WORKLOAD_IDENTITY_TESTS: true GCP_RUN_IDENTITY_TESTS: true - ENABLE_OPENTELEMETRY : true + ENABLE_OPENTELEMETRY: true E2E_IMAGE_TAG: ${{ needs.triage.outputs.image_tag }} TEST_CLUSTER_NAME: keda-e2e-cluster-pr COMMENT_BODY: ${{ github.event.comment.body }} From 0764461c7359ce792963968273ac71e897bf0c96 Mon Sep 17 00:00:00 2001 From: Ara Pulido Date: Mon, 29 Jul 2024 11:44:13 +0200 Subject: [PATCH 21/37] Add option to the Datadog scaler to use the Cluster Agent as proxy (#5496) * Add new Datadog External scaler to talk to the DCA Signed-off-by: Ara Pulido * Add ability to retrieve a metric value from the DCA Signed-off-by: Ara Pulido * Use datadogmetric naming convention Signed-off-by: Ara Pulido * Merge both Datadog scalers into one Signed-off-by: Ara Pulido * Add authMode to the TriggerAuthentication Signed-off-by: Ara Pulido * Add unit tests for Datadog scaler with Cluster Agent proxy Signed-off-by: Ara Pulido * Fix activation Signed-off-by: Ara Pulido * Add E2E tests for the Datadog scaler using the Cluster Agent proxy Signed-off-by: Ara Pulido * Fixes after rebase Signed-off-by: Ara Pulido * Rearrange Datadog tests Signed-off-by: Ara Pulido * Fix linting errors Signed-off-by: Ara Pulido * Keep token only auth Signed-off-by: Ara Pulido * Remove references to ca metadata Signed-off-by: Ara Pulido * remove trailing space Signed-off-by: Ara Pulido * Add changelog entry for cluster agent as proxy feature Signed-off-by: Ara Pulido * Fix activation parameter for Datadog API Signed-off-by: Ara Pulido * Adjust Datadog test values Signed-off-by: Ara Pulido * Fix typo in Datadog test Signed-off-by: Ara Pulido * Make cluster agent metric server parameter mandatory Signed-off-by: Ara Pulido * Use only service name and namespace to resolve the Cluster Agent service IP Signed-off-by: Ara Pulido * Fix linting issues Signed-off-by: Ara Pulido --------- Signed-off-by: Ara Pulido Co-authored-by: Jorge Turrado Ferrero Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scalers/datadog_scaler.go | 344 ++++++++++++-- pkg/scalers/datadog_scaler_test.go | 87 +++- .../datadog_api_test.go} | 26 +- .../datadog/datadog_dca/datadog_dca_test.go | 441 ++++++++++++++++++ 5 files changed, 844 insertions(+), 55 deletions(-) rename tests/scalers/datadog/{datadog_test.go => datadog_api/datadog_api_test.go} (94%) create mode 100644 tests/scalers/datadog/datadog_dca/datadog_dca_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c83a8e186a..675644eda99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio Here is an overview of all new **experimental** features: - TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +- **Datadog Scaler**: Add support to use the Cluster Agent as source of metrics ([#5355](https://github.com/kedacore/keda/issues/5355)) ### Improvements diff --git a/pkg/scalers/datadog_scaler.go b/pkg/scalers/datadog_scaler.go index c06ede68aeb..aa8b499eaff 100644 --- a/pkg/scalers/datadog_scaler.go +++ b/pkg/scalers/datadog_scaler.go @@ -2,7 +2,10 @@ package scalers import ( "context" + "errors" "fmt" + "io" + "net/http" "regexp" "strconv" "strings" @@ -10,34 +13,61 @@ import ( datadog "github.com/DataDog/datadog-api-client-go/api/v1/datadog" "github.com/go-logr/logr" + "github.com/tidwall/gjson" v2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/metrics/pkg/apis/external_metrics" + "github.com/kedacore/keda/v2/pkg/scalers/authentication" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" kedautil "github.com/kedacore/keda/v2/pkg/util" ) type datadogScaler struct { - metadata *datadogMetadata - apiClient *datadog.APIClient - logger logr.Logger + metadata *datadogMetadata + apiClient *datadog.APIClient + httpClient *http.Client + logger logr.Logger + useClusterAgentProxy bool } type datadogMetadata struct { - apiKey string - appKey string - datadogSite string + + // AuthParams Cluster Agent Proxy + datadogNamespace string + datadogMetricsService string + datadogMetricsServicePort int + unsafeSsl bool + + // bearer auth Cluster Agent Proxy + enableBearerAuth bool + bearerToken string + + // TriggerMetadata Cluster Agent Proxy + datadogMetricServiceURL string + datadogMetricName string + datadogMetricNamespace string + activationTargetValue float64 + + // AuthParams Datadog API + apiKey string + appKey string + datadogSite string + + // TriggerMetadata Datadog API query string - queryValue float64 queryAggegrator string activationQueryValue float64 - vType v2.MetricTargetType - metricName string age int timeWindowOffset int lastAvailablePointOffset int - useFiller bool - fillValue float64 + + // TriggerMetadata Common + hpaMetricName string + fillValue float64 + targetValue float64 + useFiller bool + vType v2.MetricTargetType } const maxString = "max" @@ -53,19 +83,42 @@ func init() { func NewDatadogScaler(ctx context.Context, config *scalersconfig.ScalerConfig) (Scaler, error) { logger := InitializeLogger(config, "datadog_scaler") - meta, err := parseDatadogMetadata(config, logger) - if err != nil { - return nil, fmt.Errorf("error parsing Datadog metadata: %w", err) + var useClusterAgentProxy bool + var meta *datadogMetadata + var err error + var apiClient *datadog.APIClient + var httpClient *http.Client + + if val, ok := config.TriggerMetadata["useClusterAgentProxy"]; ok { + useClusterAgentProxy, err = strconv.ParseBool(val) + if err != nil { + return nil, fmt.Errorf("error parsing useClusterAgentProxy: %w", err) + } } - apiClient, err := newDatadogConnection(ctx, meta, config) - if err != nil { - return nil, fmt.Errorf("error establishing Datadog connection: %w", err) + if useClusterAgentProxy { + meta, err = parseDatadogClusterAgentMetadata(config, logger) + if err != nil { + return nil, fmt.Errorf("error parsing Datadog metadata: %w", err) + } + httpClient = kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, meta.unsafeSsl) + } else { + meta, err = parseDatadogAPIMetadata(config, logger) + if err != nil { + return nil, fmt.Errorf("error parsing Datadog metadata: %w", err) + } + apiClient, err = newDatadogAPIConnection(ctx, meta, config) + if err != nil { + return nil, fmt.Errorf("error establishing Datadog connection: %w", err) + } } + return &datadogScaler{ - metadata: meta, - apiClient: apiClient, - logger: logger, + metadata: meta, + apiClient: apiClient, + httpClient: httpClient, + logger: logger, + useClusterAgentProxy: useClusterAgentProxy, }, nil } @@ -79,7 +132,17 @@ func parseDatadogQuery(q string) (bool, error) { return true, nil } -func parseDatadogMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (*datadogMetadata, error) { +// buildClusterAgentURL builds the URL for the Cluster Agent Metrics API service +func buildClusterAgentURL(datadogMetricsService, datadogNamespace string, datadogMetricsServicePort int) string { + return fmt.Sprintf("https://%s.%s:%d/apis/external.metrics.k8s.io/v1beta1", datadogMetricsService, datadogNamespace, datadogMetricsServicePort) +} + +// buildMetricURL builds the URL for the Datadog metric +func buildMetricURL(datadogClusterAgentURL, datadogMetricNamespace, datadogMetricName string) string { + return fmt.Sprintf("%s/namespaces/%s/%s", datadogClusterAgentURL, datadogMetricNamespace, datadogMetricName) +} + +func parseDatadogAPIMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (*datadogMetadata, error) { meta := datadogMetadata{} if val, ok := config.TriggerMetadata["age"]; ok { @@ -137,17 +200,23 @@ func parseDatadogMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger return nil, fmt.Errorf("no query given") } - if val, ok := config.TriggerMetadata["queryValue"]; ok { - queryValue, err := strconv.ParseFloat(val, 64) + if val, ok := config.TriggerMetadata["targetValue"]; ok { + targetValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("targetValue parsing error %w", err) + } + meta.targetValue = targetValue + } else if val, ok := config.TriggerMetadata["queryValue"]; ok { + targetValue, err := strconv.ParseFloat(val, 64) if err != nil { return nil, fmt.Errorf("queryValue parsing error %w", err) } - meta.queryValue = queryValue + meta.targetValue = targetValue } else { if config.AsMetricSource { - meta.queryValue = 0 + meta.targetValue = 0 } else { - return nil, fmt.Errorf("no queryValue given") + return nil, fmt.Errorf("no targetValue or queryValue given") } } @@ -223,14 +292,140 @@ func parseDatadogMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger meta.datadogSite = siteVal - metricName := meta.query[0:strings.Index(meta.query, "{")] - meta.metricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("datadog-%s", metricName))) + hpaMetricName := meta.query[0:strings.Index(meta.query, "{")] + meta.hpaMetricName = GenerateMetricNameWithIndex(config.TriggerIndex, kedautil.NormalizeString(fmt.Sprintf("datadog-%s", hpaMetricName))) + + return &meta, nil +} + +func parseDatadogClusterAgentMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (*datadogMetadata, error) { + meta := datadogMetadata{} + + if val, ok := config.AuthParams["datadogNamespace"]; ok { + meta.datadogNamespace = val + } else { + return nil, fmt.Errorf("no datadogNamespace key given") + } + + if val, ok := config.AuthParams["datadogMetricsService"]; ok { + meta.datadogMetricsService = val + } else { + return nil, fmt.Errorf("no datadogMetricsService key given") + } + + if val, ok := config.AuthParams["datadogMetricsServicePort"]; ok { + port, err := strconv.Atoi(val) + if err != nil { + return nil, fmt.Errorf("datadogMetricServicePort parsing error %w", err) + } + meta.datadogMetricsServicePort = port + } else { + meta.datadogMetricsServicePort = 8443 + } + + meta.datadogMetricServiceURL = buildClusterAgentURL(meta.datadogMetricsService, meta.datadogNamespace, meta.datadogMetricsServicePort) + + meta.unsafeSsl = false + if val, ok := config.AuthParams["unsafeSsl"]; ok { + unsafeSsl, err := strconv.ParseBool(val) + if err != nil { + return nil, fmt.Errorf("error parsing unsafeSsl: %w", err) + } + meta.unsafeSsl = unsafeSsl + } + + if val, ok := config.TriggerMetadata["datadogMetricName"]; ok { + meta.datadogMetricName = val + } else { + return nil, fmt.Errorf("no datadogMetricName key given") + } + + if val, ok := config.TriggerMetadata["datadogMetricNamespace"]; ok { + meta.datadogMetricNamespace = val + } else { + return nil, fmt.Errorf("no datadogMetricNamespace key given") + } + + meta.hpaMetricName = "datadogmetric@" + meta.datadogMetricNamespace + ":" + meta.datadogMetricName + + if val, ok := config.TriggerMetadata["targetValue"]; ok { + targetValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("targetValue parsing error %w", err) + } + meta.targetValue = targetValue + } else { + if config.AsMetricSource { + meta.targetValue = 0 + } else { + return nil, fmt.Errorf("no targetValue given") + } + } + + meta.activationTargetValue = 0 + if val, ok := config.TriggerMetadata["activationTargetValue"]; ok { + activationTargetValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("activationTargetValue parsing error %w", err) + } + meta.activationTargetValue = activationTargetValue + } + + if val, ok := config.TriggerMetadata["metricUnavailableValue"]; ok { + fillValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return nil, fmt.Errorf("metricUnavailableValue parsing error %w", err) + } + meta.fillValue = fillValue + meta.useFiller = true + } + + if val, ok := config.TriggerMetadata["type"]; ok { + logger.V(0).Info("trigger.metadata.type is deprecated in favor of trigger.metricType") + if config.MetricType != "" { + return nil, fmt.Errorf("only one of trigger.metadata.type or trigger.metricType should be defined") + } + val = strings.ToLower(val) + switch val { + case avgString: + meta.vType = v2.AverageValueMetricType + case "global": + meta.vType = v2.ValueMetricType + default: + return nil, fmt.Errorf("type has to be global or average") + } + } else { + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) + } + meta.vType = metricType + } + + authMode, ok := config.AuthParams["authMode"] + // no authMode specified + if !ok { + return &meta, nil + } + + authType := authentication.Type(strings.TrimSpace(authMode)) + switch authType { + case authentication.BearerAuthType: + if len(config.AuthParams["token"]) == 0 { + return nil, errors.New("no token provided") + } + + meta.bearerToken = config.AuthParams["token"] + meta.enableBearerAuth = true + default: + return nil, fmt.Errorf("err incorrect value for authMode is given: %s", authMode) + } return &meta, nil } -// newDatadogConnection tests a connection to the Datadog API -func newDatadogConnection(ctx context.Context, meta *datadogMetadata, config *scalersconfig.ScalerConfig) (*datadog.APIClient, error) { +// newDatadogAPIConnection tests a connection to the Datadog API +func newDatadogAPIConnection(ctx context.Context, meta *datadogMetadata, config *scalersconfig.ScalerConfig) (*datadog.APIClient, error) { ctx = context.WithValue( ctx, datadog.ContextAPIKeys, @@ -373,13 +568,73 @@ func (s *datadogScaler) getQueryResult(ctx context.Context) (float64, error) { } } +func (s *datadogScaler) getDatadogMetricValue(req *http.Request) (float64, error) { + resp, err := s.httpClient.Do(req) + + if err != nil { + return 0, fmt.Errorf("error getting metric value: %w", err) + } + + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + r := gjson.GetBytes(body, "message") + if r.Type == gjson.String { + return 0, fmt.Errorf("error getting metric value: %s", r.String()) + } + } + + valueLocation := "items.0.value" + r := gjson.GetBytes(body, valueLocation) + errorMsg := "the metric value must be of type number or a string representing a Quantity got: '%s'" + + if r.Type == gjson.String { + v, err := resource.ParseQuantity(r.String()) + if err != nil { + return 0, fmt.Errorf(errorMsg, r.String()) + } + return v.AsApproximateFloat64(), nil + } + if r.Type != gjson.Number { + return 0, fmt.Errorf(errorMsg, r.Type.String()) + } + return r.Num, nil +} + +func (s *datadogScaler) getDatadogClusterAgentHTTPRequest(ctx context.Context, url string) (*http.Request, error) { + var req *http.Request + var err error + + switch { + case s.metadata.enableBearerAuth: + req, err = http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", s.metadata.bearerToken)) + if err != nil { + return nil, err + } + return req, nil + + default: + req, err = http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return req, err + } + } + + return nil, nil +} + // GetMetricSpecForScaling returns the MetricSpec for the Horizontal Pod Autoscaler func (s *datadogScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { externalMetric := &v2.ExternalMetricSource{ Metric: v2.MetricIdentifier{ - Name: s.metadata.metricName, + Name: s.metadata.hpaMetricName, }, - Target: GetMetricTargetMili(s.metadata.vType, s.metadata.queryValue), + Target: GetMetricTargetMili(s.metadata.vType, s.metadata.targetValue), } metricSpec := v2.MetricSpec{ External: externalMetric, Type: externalMetricType, @@ -389,14 +644,33 @@ func (s *datadogScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec // GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *datadogScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - num, err := s.getQueryResult(ctx) + var metric external_metrics.ExternalMetricValue + var num float64 + var err error + + if s.useClusterAgentProxy { + url := buildMetricURL(s.metadata.datadogMetricServiceURL, s.metadata.datadogMetricNamespace, s.metadata.hpaMetricName) + + req, err := s.getDatadogClusterAgentHTTPRequest(ctx, url) + if (err != nil) || (req == nil) { + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error generating http request: %w", err) + } + + num, err = s.getDatadogMetricValue(req) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error getting metric value: %w", err) + } + + metric = GenerateMetricInMili(metricName, num) + return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationTargetValue, nil + } + num, err = s.getQueryResult(ctx) if err != nil { s.logger.Error(err, "error getting metrics from Datadog") return []external_metrics.ExternalMetricValue{}, false, fmt.Errorf("error getting metrics from Datadog: %w", err) } - metric := GenerateMetricInMili(metricName, num) - + metric = GenerateMetricInMili(metricName, num) return []external_metrics.ExternalMetricValue{metric}, num > s.metadata.activationQueryValue, nil } diff --git a/pkg/scalers/datadog_scaler_test.go b/pkg/scalers/datadog_scaler_test.go index 41e32c7f0eb..bf12cadc60d 100644 --- a/pkg/scalers/datadog_scaler_test.go +++ b/pkg/scalers/datadog_scaler_test.go @@ -16,8 +16,16 @@ type datadogQueries struct { isError bool } +type datadogScalerType int64 + +const ( + apiType datadogScalerType = iota + clusterAgentType +) + type datadogMetricIdentifier struct { metadataTestData *datadogAuthMetadataTestData + typeOfScaler datadogScalerType triggerIndex int name string } @@ -90,7 +98,29 @@ func TestDatadogScalerParseQueries(t *testing.T) { } } -var testDatadogMetadata = []datadogAuthMetadataTestData{ +var testDatadogClusterAgentMetadata = []datadogAuthMetadataTestData{ + {"", map[string]string{}, map[string]string{}, true}, + + // all properly formed + {"", map[string]string{"useClusterAgentProxy": "true", "datadogMetricName": "nginx-hits", "datadogMetricNamespace": "default", "targetValue": "2", "type": "global"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "datadogMetricsServicePort": "8080", "unsafeSsl": "true", "authMode": "bearer"}, false}, + // Default Datadog service name and port + {"", map[string]string{"useClusterAgentProxy": "true", "datadogMetricName": "nginx-hits", "datadogMetricNamespace": "default", "targetValue": "2", "type": "global"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "unsafeSsl": "true", "authMode": "bearer"}, false}, + + // both metadata type and trigger type + {v2.AverageValueMetricType, map[string]string{"useClusterAgentProxy": "true", "datadogMetricName": "nginx-hits", "datadogMetricNamespace": "default", "targetValue": "2", "type": "global"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "unsafeSsl": "true", "authMode": "bearer"}, true}, + // missing DatadogMetric name + {"", map[string]string{"useClusterAgentProxy": "true", "datadogMetricNamespace": "default", "targetValue": "2", "type": "global"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "unsafeSsl": "true", "authMode": "bearer"}, true}, + // missing DatadogMetric namespace + {"", map[string]string{"useClusterAgentProxy": "true", "datadogMetricName": "nginx-hits", "targetValue": "2", "type": "global"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "unsafeSsl": "true", "authMode": "bearer"}, true}, + // wrong port type + {"", map[string]string{"useClusterAgentProxy": "true", "datadogMetricName": "nginx-hits", "datadogMetricNamespace": "default", "targetValue": "2", "type": "global"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "datadogMetricsServicePort": "notanint", "unsafeSsl": "true", "authMode": "bearer"}, true}, + // wrong targetValue type + {"", map[string]string{"useClusterAgentProxy": "true", "datadogMetricName": "nginx-hits", "datadogMetricNamespace": "default", "targetValue": "notanint", "type": "global"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "datadogMetricsServicePort": "8080", "unsafeSsl": "true", "authMode": "bearer"}, true}, + // wrong type + {"", map[string]string{"useClusterAgentProxy": "true", "datadogMetricName": "nginx-hits", "datadogMetricNamespace": "default", "targetValue": "2", "type": "notatype"}, map[string]string{"token": "token", "datadogNamespace": "datadog", "datadogMetricsService": "datadog-cluster-agent-metrics-api", "datadogMetricsServicePort": "8080", "unsafeSsl": "true", "authMode": "bearer"}, true}, +} + +var testDatadogAPIMetadata = []datadogAuthMetadataTestData{ {"", map[string]string{}, map[string]string{}, true}, // all properly formed @@ -135,9 +165,22 @@ var testDatadogMetadata = []datadogAuthMetadataTestData{ {"", map[string]string{"query": "sum:trace.redis.command.hits.as_count()", "queryValue": "7"}, map[string]string{}, true}, } -func TestDatadogScalerAuthParams(t *testing.T) { - for _, testData := range testDatadogMetadata { - _, err := parseDatadogMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams, MetricType: testData.metricType}, logr.Discard()) +func TestDatadogScalerAPIAuthParams(t *testing.T) { + for _, testData := range testDatadogAPIMetadata { + _, err := parseDatadogAPIMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams, MetricType: testData.metricType}, logr.Discard()) + + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Error("Expected error but got success") + } + } +} + +func TestDatadogScalerClusterAgentAuthParams(t *testing.T) { + for _, testData := range testDatadogClusterAgentMetadata { + _, err := parseDatadogClusterAgentMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams, MetricType: testData.metricType}, logr.Discard()) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -149,19 +192,29 @@ func TestDatadogScalerAuthParams(t *testing.T) { } var datadogMetricIdentifiers = []datadogMetricIdentifier{ - {&testDatadogMetadata[1], 0, "s0-datadog-sum-trace-redis-command-hits"}, - {&testDatadogMetadata[1], 1, "s1-datadog-sum-trace-redis-command-hits"}, + {&testDatadogAPIMetadata[1], apiType, 0, "s0-datadog-sum-trace-redis-command-hits"}, + {&testDatadogAPIMetadata[1], apiType, 1, "s1-datadog-sum-trace-redis-command-hits"}, + {&testDatadogClusterAgentMetadata[1], clusterAgentType, 0, "datadogmetric@default:nginx-hits"}, } func TestDatadogGetMetricSpecForScaling(t *testing.T) { + var err error + var meta *datadogMetadata + for _, testData := range datadogMetricIdentifiers { - meta, err := parseDatadogMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex, MetricType: testData.metadataTestData.metricType}, logr.Discard()) + if testData.typeOfScaler == apiType { + meta, err = parseDatadogAPIMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex, MetricType: testData.metadataTestData.metricType}, logr.Discard()) + } else { + meta, err = parseDatadogClusterAgentMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex, MetricType: testData.metadataTestData.metricType}, logr.Discard()) + } if err != nil { t.Fatal("Could not parse metadata:", err) } + mockDatadogScaler := datadogScaler{ - metadata: meta, - apiClient: nil, + metadata: meta, + apiClient: nil, + httpClient: nil, } metricSpec := mockDatadogScaler.GetMetricSpecForScaling(context.Background()) @@ -171,3 +224,19 @@ func TestDatadogGetMetricSpecForScaling(t *testing.T) { } } } + +func TestBuildClusterAgentURL(t *testing.T) { + // Test valid inputs + url := buildClusterAgentURL("datadogMetricsService", "datadogNamespace", 8080) + if url != "https://datadogMetricsService.datadogNamespace:8080/apis/external.metrics.k8s.io/v1beta1" { + t.Error("Expected https://datadogMetricsService.datadogNamespace:8080/apis/external.metrics.k8s.io/v1beta1, got ", url) + } +} + +func TestBuildMetricURL(t *testing.T) { + // Test valid inputs + url := buildMetricURL("https://localhost:8080/apis/datadoghq.com/v1alpha1", "datadogMetricNamespace", "datadogMetricName") + if url != "https://localhost:8080/apis/datadoghq.com/v1alpha1/namespaces/datadogMetricNamespace/datadogMetricName" { + t.Error("Expected https://localhost:8080/apis/datadoghq.com/v1alpha1/namespaces/datadogMetricNamespace/datadogMetricName, got ", url) + } +} diff --git a/tests/scalers/datadog/datadog_test.go b/tests/scalers/datadog/datadog_api/datadog_api_test.go similarity index 94% rename from tests/scalers/datadog/datadog_test.go rename to tests/scalers/datadog/datadog_api/datadog_api_test.go index 99c46dc2366..f4125797291 100644 --- a/tests/scalers/datadog/datadog_test.go +++ b/tests/scalers/datadog/datadog_api/datadog_api_test.go @@ -1,7 +1,7 @@ //go:build e2e // +build e2e -package datadog_test +package datadog_api_test import ( "encoding/base64" @@ -28,7 +28,7 @@ var ( testNamespace = fmt.Sprintf("%s-ns", testName) deploymentName = fmt.Sprintf("%s-deployment", testName) monitoredDeploymentName = fmt.Sprintf("%s-monitored-deployment", testName) - servciceName = fmt.Sprintf("%s-service", testName) + serviceName = fmt.Sprintf("%s-service", testName) triggerAuthName = fmt.Sprintf("%s-ta", testName) scaledObjectName = fmt.Sprintf("%s-so", testName) secretName = fmt.Sprintf("%s-secret", testName) @@ -46,7 +46,7 @@ type templateData struct { TestNamespace string DeploymentName string MonitoredDeploymentName string - ServciceName string + ServiceName string ScaledObjectName string TriggerAuthName string SecretName string @@ -174,7 +174,7 @@ spec: apiVersion: v1 kind: Service metadata: - name: {{.ServciceName}} + name: {{.ServiceName}} namespace: {{.TestNamespace}} spec: ports: @@ -213,7 +213,7 @@ spec: triggers: - type: datadog metadata: - query: "avg:nginx.net.request_per_s{cluster_name:{{.KuberneteClusterName}}}" + query: "avg:nginx.net.request_per_s{cluster_name:{{.KuberneteClusterName}}, kube_namespace:{{.TestNamespace}}}" queryValue: "2" activationQueryValue: "3" age: "120" @@ -231,7 +231,7 @@ spec: - image: busybox name: test command: ["/bin/sh"] - args: ["-c", "while true; do wget -O /dev/null -o /dev/null http://{{.ServciceName}}/; sleep 0.5; done"]` + args: ["-c", "while true; do wget -O /dev/null -o /dev/null http://{{.ServiceName}}/; sleep 5; done"]` heavyLoadTemplate = `apiVersion: v1 kind: Pod @@ -243,10 +243,10 @@ spec: - image: busybox name: test command: ["/bin/sh"] - args: ["-c", "while true; do wget -O /dev/null -o /dev/null http://{{.ServciceName}}/; sleep 0.1; done"]` + args: ["-c", "while true; do wget -O /dev/null -o /dev/null http://{{.ServiceName}}/; sleep 0.1; done"]` ) -func TestDatadogScaler(t *testing.T) { +func TestDatadogScalerAPI(t *testing.T) { // setup t.Log("--- setting up ---") require.NotEmpty(t, datadogAppKey, "DATADOG_APP_KEY env variable is required for datadog tests") @@ -261,11 +261,16 @@ func TestDatadogScaler(t *testing.T) { // install datadog CreateNamespace(t, kc, testNamespace) - installDatadog(t) // Create kubernetes resources KubectlApplyMultipleWithTemplate(t, data, templates) + // Deploy Datadog Agent + installDatadog(t) + + t.Log("--- creating ScaledObject ---") + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 3), "replica count should be %d after 3 minutes", minReplicaCount) @@ -318,7 +323,7 @@ func getTemplateData() (templateData, []Template) { TestNamespace: testNamespace, DeploymentName: deploymentName, MonitoredDeploymentName: monitoredDeploymentName, - ServciceName: servciceName, + ServiceName: serviceName, TriggerAuthName: triggerAuthName, ScaledObjectName: scaledObjectName, SecretName: secretName, @@ -336,6 +341,5 @@ func getTemplateData() (templateData, []Template) { {Name: "serviceTemplate", Config: serviceTemplate}, {Name: "deploymentTemplate", Config: deploymentTemplate}, {Name: "monitoredDeploymentTemplate", Config: monitoredDeploymentTemplate}, - {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, } } diff --git a/tests/scalers/datadog/datadog_dca/datadog_dca_test.go b/tests/scalers/datadog/datadog_dca/datadog_dca_test.go new file mode 100644 index 00000000000..66512515484 --- /dev/null +++ b/tests/scalers/datadog/datadog_dca/datadog_dca_test.go @@ -0,0 +1,441 @@ +//go:build e2e +// +build e2e + +package datadog_dca_test + +import ( + "encoding/base64" + "fmt" + "os" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "datadog-dca-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + dcaServiceAccount = fmt.Sprintf("%s-sa", testName) + dcaClusterRole = fmt.Sprintf("%s-cr", testName) + dcaClusterRoleBinding = fmt.Sprintf("%s-crb", testName) + dcaSAToken = fmt.Sprintf("%s-sa-token", testName) + datadogConfigName = fmt.Sprintf("%s-datadog-config", testName) + datadogMetricName = fmt.Sprintf("%s-datadog-metric", testName) + + deploymentName = fmt.Sprintf("%s-deployment", testName) + monitoredDeploymentName = fmt.Sprintf("%s-monitored-deployment", testName) + serviceName = fmt.Sprintf("%s-service", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + configName = fmt.Sprintf("%s-config", testName) + datadogAPIKey = os.Getenv("DATADOG_API_KEY") + datadogAppKey = os.Getenv("DATADOG_APP_KEY") + datadogSite = os.Getenv("DATADOG_SITE") + datadogHelmRepo = "https://helm.datadoghq.com" + kuberneteClusterName = "keda-datadog-cluster" + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + DcaServiceAccount string + DcaClusterRole string + DcaClusterRoleBinding string + DcaServiceAccountToken string + DatadogConfigName string + DatadogConfigNamespace string + DatadogConfigMetricsService string + DatadogConfigUnsafeSSL string + DatadogConfigAuthMode string + DatadogMetricName string + + DeploymentName string + MonitoredDeploymentName string + ServiceName string + ScaledObjectName string + TriggerAuthName string + SecretName string + ConfigName string + DatadogAPIKey string + DatadogAppKey string + DatadogSite string + KuberneteClusterName string + MinReplicaCount string + MaxReplicaCount string +} + +const ( + datadogMetricTemplate = `apiVersion: datadoghq.com/v1alpha1 +kind: DatadogMetric +metadata: + name: {{.DatadogMetricName}} + namespace: {{.TestNamespace}} + annotations: + external-metrics.datadoghq.com/always-active: "true" +spec: + query: "avg:nginx.net.request_per_s{cluster_name:{{.KuberneteClusterName}}, kube_namespace:{{.TestNamespace}}}" +` + + dcaServiceAccountTemplate = `apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{.DcaServiceAccount}} + namespace: {{.TestNamespace}} +` + dcaClusterRoleTemplate = `apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{.DcaClusterRole}} +rules: +- apiGroups: + - external.metrics.k8s.io + resources: + - '*' + verbs: ["get", "watch", "list"] +` + dcaClusterRoleBindingTemplate = `apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{.DcaClusterRoleBinding}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{.DcaClusterRole}} +subjects: +- kind: ServiceAccount + name: {{.DcaServiceAccount}} + namespace: {{.TestNamespace}} +` + dcaServiceAccountTokenTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.DcaServiceAccountToken}} + namespace: {{.TestNamespace}} + annotations: + kubernetes.io/service-account.name: {{.DcaServiceAccount}} +type: kubernetes.io/service-account-token +` + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + apiKey: {{.DatadogAPIKey}} + appKey: {{.DatadogAppKey}} + datadogSite: {{.DatadogSite}} +` + datadogConfigTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.DatadogConfigName}} + namespace: {{.TestNamespace}} +data: + datadogNamespace: {{.DatadogConfigNamespace}} + datadogMetricsService: {{.DatadogConfigMetricsService}} + datadogUnsafeSSL: {{.DatadogConfigUnsafeSSL}} + datadogAuthMode: {{.DatadogConfigAuthMode}} +` + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: token + name: {{.DcaServiceAccountToken}} + key: token + - parameter: datadogNamespace + name: {{.DatadogConfigName}} + key: datadogNamespace + - parameter: datadogMetricsService + name: {{.DatadogConfigName}} + key: datadogMetricsService + - parameter: unsafeSsl + name: {{.DatadogConfigName}} + key: datadogUnsafeSSL + - parameter: authMode + name: {{.DatadogConfigName}} + key: datadogAuthMode +` + configTemplate = `apiVersion: v1 +kind: ConfigMap +metadata: + name: {{.ConfigName}} + namespace: {{.TestNamespace}} +data: + status.conf: | + server { + listen 81; + location /nginx_status { + stub_status on; + } + } +` + deploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: nginx + image: nginxinc/nginx-unprivileged + ports: + - containerPort: 80 +` + monitoredDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: nginx + name: {{.MonitoredDeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + creationTimestamp: null + labels: + app: nginx + annotations: + ad.datadoghq.com/nginx.check_names: '["nginx"]' + ad.datadoghq.com/nginx.init_configs: '[{}]' + ad.datadoghq.com/nginx.instances: | + [ + { + "nginx_status_url":"http://%%host%%:81/nginx_status/" + } + ] + spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 + - containerPort: 81 + volumeMounts: + - mountPath: /etc/nginx/conf.d/status.conf + subPath: status.conf + readOnly: true + name: "config" + volumes: + - name: "config" + configMap: + name: {{.ConfigName}} +` + serviceTemplate = ` +apiVersion: v1 +kind: Service +metadata: + name: {{.ServiceName}} + namespace: {{.TestNamespace}} +spec: + ports: + - name: default + port: 80 + protocol: TCP + targetPort: 80 + - name: status + port: 81 + protocol: TCP + targetPort: 81 + selector: + app: nginx +` + + scaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 1 + cooldownPeriod: 1 + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 10 + triggers: + - type: datadog + metadata: + useClusterAgentProxy: "true" + datadogMetricName: {{.DatadogMetricName}} + datadogMetricNamespace: {{.TestNamespace}} + targetValue: "2" + activationTargetValue: "3" + metricType: "Value" + authenticationRef: + name: {{.TriggerAuthName}} +` + lightLoadTemplate = `apiVersion: v1 +kind: Pod +metadata: + name: fake-light-traffic + namespace: {{.TestNamespace}} +spec: + containers: + - image: busybox + name: test + command: ["/bin/sh"] + args: ["-c", "while true; do wget -O /dev/null -o /dev/null http://{{.ServiceName}}/; sleep 5; done"]` + + heavyLoadTemplate = `apiVersion: v1 +kind: Pod +metadata: + name: fake-heavy-traffic + namespace: {{.TestNamespace}} +spec: + containers: + - image: busybox + name: test + command: ["/bin/sh"] + args: ["-c", "while true; do wget -O /dev/null -o /dev/null http://{{.ServiceName}}/; sleep 0.1; done"]` +) + +func TestDatadogScalerDCA(t *testing.T) { + // setup + t.Log("--- setting up ---") + require.NotEmpty(t, datadogAppKey, "DATADOG_APP_KEY env variable is required for datadog tests") + require.NotEmpty(t, datadogAPIKey, "DATADOG_API_KEY env variable is required for datadog tests") + require.NotEmpty(t, datadogSite, "DATADOG_SITE env variable is required for datadog tests") + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + CreateKubernetesResources(t, kc, testNamespace, data, templates) + installDatadog(t) + + t.Log("--- creating DatadogMetric & ScaledObject ---") + KubectlApplyWithTemplate(t, data, "datadogMetricTemplate", datadogMetricTemplate) + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 180, 3), + "replica count should be %d after 3 minutes", minReplicaCount) + + // test scaling + testActivation(t, kc, data) + testScaleOut(t, kc, data) + testScaleIn(t, kc, data) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation ---") + KubectlApplyWithTemplate(t, data, "lightLoadTemplate", lightLoadTemplate) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale out ---") + KubectlApplyWithTemplate(t, data, "heavyLoadTemplate", heavyLoadTemplate) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale in ---") + KubectlDeleteWithTemplate(t, data, "lightLoadTemplate", lightLoadTemplate) + KubectlDeleteWithTemplate(t, data, "heavyLoadTemplate", heavyLoadTemplate) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +func installDatadog(t *testing.T) { + t.Log("--- installing datadog ---") + _, err := ExecuteCommand(fmt.Sprintf("helm repo add datadog %s", datadogHelmRepo)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) + _, err = ExecuteCommand("helm repo update") + assert.NoErrorf(t, err, "cannot execute command - %s", err) + _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --set datadog.apiKey=%s --set datadog.appKey=%s --set datadog.site=%s --set datadog.clusterName=%s --set datadog.kubelet.tlsVerify=false --set clusterAgent.metricsProvider.enabled=true --set clusterAgent.metricsProvider.registerAPIService=false --set clusterAgent.metricsProvider.useDatadogMetrics=true --namespace %s --wait %s datadog/datadog`, + datadogAPIKey, + datadogAppKey, + datadogSite, + kuberneteClusterName, + testNamespace, + testName)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DcaServiceAccount: dcaServiceAccount, + DcaClusterRole: dcaClusterRole, + DcaClusterRoleBinding: dcaClusterRoleBinding, + DcaServiceAccountToken: dcaSAToken, + DatadogConfigName: datadogConfigName, + DatadogConfigNamespace: base64.StdEncoding.EncodeToString([]byte(testNamespace)), + DatadogConfigMetricsService: base64.StdEncoding.EncodeToString([]byte(testName + "-cluster-agent-metrics-api")), + DatadogConfigUnsafeSSL: base64.StdEncoding.EncodeToString([]byte("true")), + DatadogConfigAuthMode: base64.StdEncoding.EncodeToString([]byte("bearer")), + DatadogMetricName: datadogMetricName, + DeploymentName: deploymentName, + MonitoredDeploymentName: monitoredDeploymentName, + ServiceName: serviceName, + TriggerAuthName: triggerAuthName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + ConfigName: configName, + DatadogAPIKey: base64.StdEncoding.EncodeToString([]byte(datadogAPIKey)), + DatadogAppKey: base64.StdEncoding.EncodeToString([]byte(datadogAppKey)), + DatadogSite: base64.StdEncoding.EncodeToString([]byte(datadogSite)), + KuberneteClusterName: kuberneteClusterName, + MinReplicaCount: fmt.Sprintf("%v", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%v", maxReplicaCount), + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "dcaServiceAccountTemplate", Config: dcaServiceAccountTemplate}, + {Name: "dcaClusterRoleTemplate", Config: dcaClusterRoleTemplate}, + {Name: "dcaClusterRoleBindingTemplate", Config: dcaClusterRoleBindingTemplate}, + {Name: "dcaServiceAccountTokenTemplate", Config: dcaServiceAccountTokenTemplate}, + {Name: "configTemplate", Config: configTemplate}, + {Name: "datadogConfigTemplate", Config: datadogConfigTemplate}, + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "serviceTemplate", Config: serviceTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "monitoredDeploymentTemplate", Config: monitoredDeploymentTemplate}, + } +} From 9cc1663fb7e0192a6cf94831945710d6f97cb20d Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Tue, 30 Jul 2024 11:06:19 +0200 Subject: [PATCH 22/37] chore: Update supported versions (#5993) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- Makefile | 2 +- pkg/util/welcome.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 890d09a326a..4ee8b59f2df 100644 --- a/Makefile +++ b/Makefile @@ -52,7 +52,7 @@ GO_LDFLAGS="-X=github.com/kedacore/keda/v2/version.GitCommit=$(GIT_COMMIT) -X=gi COSIGN_FLAGS ?= -y -a GIT_HASH=${GIT_COMMIT} -a GIT_VERSION=${VERSION} -a BUILD_DATE=${DATE} # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.28 +ENVTEST_K8S_VERSION = 1.30 # Setting SHELL to bash allows bash commands to be executed by recipes. # This is a requirement for 'setup-envtest.sh' in the test target. diff --git a/pkg/util/welcome.go b/pkg/util/welcome.go index 680894eb177..4a4e6729d32 100644 --- a/pkg/util/welcome.go +++ b/pkg/util/welcome.go @@ -26,8 +26,8 @@ import ( ) const ( - minSupportedVersion = 27 - maxSupportedVersion = 29 + minSupportedVersion = 28 + maxSupportedVersion = 30 ) func PrintWelcome(logger logr.Logger, kubeVersion K8sVersion, component string) { From 5d58cedaca104587e4eb3471eda6448ee67fcfeb Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Tue, 30 Jul 2024 11:08:54 +0200 Subject: [PATCH 23/37] fix: Revert equinix change (#5999) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .github/workflows/pr-e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 7a0207a610a..3f5d44c5e63 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -146,7 +146,7 @@ jobs: run-test: needs: [triage, build-test-images] - runs-on: equinix-keda-runner + runs-on: e2e name: Execute e2e tests container: ghcr.io/kedacore/keda-tools:1.21.9 if: needs.triage.outputs.run-e2e == 'true' From 1ccbe6a2a39d997c3fb00138d82e8d59b6539e53 Mon Sep 17 00:00:00 2001 From: Leonardo D'Ippolito Date: Tue, 30 Jul 2024 11:43:14 +0100 Subject: [PATCH 24/37] Update GetAzureQueueLength in azure storage queue scaler to consider queueLengthStrategy (#5875) Signed-off-by: Leonardo D'Ippolito Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scalers/azure_queue_scaler.go | 54 +++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 675644eda99..278234e0021 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ Here is an overview of all new **experimental** features: ### Improvements +- **Azure queue scaler**: Added new configuration option 'queueLengthStrategy' ([#4478](https://github.com/kedacore/keda/issues/4478)) - **Cassandra Scaler**: Add TLS support for cassandra scaler ([#5802](https://github.com/kedacore/keda/issues/5802)) - **GCP Pub/Sub**: Add optional valueIfNull to allow a default scaling value and prevent errors when GCP metric returns no value. ([#5896](https://github.com/kedacore/keda/issues/5896)) - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go index 3b4fe1d113d..6f642ec04bf 100644 --- a/pkg/scalers/azure_queue_scaler.go +++ b/pkg/scalers/azure_queue_scaler.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strconv" + "strings" "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" "github.com/go-logr/logr" @@ -33,10 +34,16 @@ import ( ) const ( - queueLengthMetricName = "queueLength" - activationQueueLengthMetricName = "activationQueueLength" - defaultTargetQueueLength = 5 - externalMetricType = "External" + queueLengthMetricName = "queueLength" + activationQueueLengthMetricName = "activationQueueLength" + defaultTargetQueueLength = 5 + externalMetricType = "External" + QueueLengthStrategyAll string = "all" + QueueLengthStrategyVisibleOnly string = "visibleonly" +) + +var ( + maxPeekMessages int32 = 32 ) type azureQueueScaler struct { @@ -53,6 +60,7 @@ type azureQueueMetadata struct { connection string accountName string endpointSuffix string + queueLengthStrategy string triggerIndex int } @@ -123,6 +131,17 @@ func parseAzureQueueMetadata(config *scalersconfig.ScalerConfig, logger logr.Log return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no queueName given") } + if val, ok := config.TriggerMetadata["queueLengthStrategy"]; ok && val != "" { + strategy := strings.ToLower(val) + if strategy == QueueLengthStrategyAll || strategy == QueueLengthStrategyVisibleOnly { + meta.queueLengthStrategy = strategy + } else { + return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("invalid queueLengthStrategy %s given", val) + } + } else { + meta.queueLengthStrategy = QueueLengthStrategyAll + } + // If the Use AAD Pod Identity is not present, or set to "none" // then check for connection string switch config.PodIdentity.Provider { @@ -172,12 +191,35 @@ func (s *azureQueueScaler) GetMetricSpecForScaling(context.Context) []v2.MetricS // GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric func (s *azureQueueScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { - props, err := s.queueClient.GetProperties(ctx, nil) + queuelen, err := s.getMessageCount(ctx) if err != nil { s.logger.Error(err, "error getting queue length") return []external_metrics.ExternalMetricValue{}, false, err } - queuelen := int64(*props.ApproximateMessagesCount) + metric := GenerateMetricInMili(metricName, float64(queuelen)) return []external_metrics.ExternalMetricValue{metric}, queuelen > s.metadata.activationTargetQueueLength, nil } + +func (s *azureQueueScaler) getMessageCount(ctx context.Context) (int64, error) { + strategy := strings.ToLower(s.metadata.queueLengthStrategy) + if strategy == QueueLengthStrategyVisibleOnly { + queue, err := s.queueClient.PeekMessages(ctx, &azqueue.PeekMessagesOptions{NumberOfMessages: &maxPeekMessages}) + if err != nil { + return 0, err + } + visibleMessageCount := len(queue.Messages) + + // Queue has less messages than we allowed to peek for, + // so no need to fall back to the 'all' strategy + if visibleMessageCount < int(maxPeekMessages) { + return int64(visibleMessageCount), nil + } + } + + props, err := s.queueClient.GetProperties(ctx, nil) + if err != nil { + return 0, err + } + return int64(*props.ApproximateMessagesCount), nil +} From b5865c661bb8a09c7f035e271dcd7de8826c2f98 Mon Sep 17 00:00:00 2001 From: June Han Date: Tue, 30 Jul 2024 21:54:21 +0900 Subject: [PATCH 25/37] Add eagerScalingStrategy for ScaledJob (#5872) Signed-off-by: June Han Signed-off-by: Zbynek Roubalik Co-authored-by: Zbynek Roubalik Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scaling/executor/scale_jobs.go | 28 ++-- pkg/scaling/executor/scale_jobs_test.go | 51 +++++-- .../eager_scaling_strategy_test.go | 134 ++++++++++++++++++ 4 files changed, 193 insertions(+), 21 deletions(-) create mode 100644 tests/internals/scaling_strategies/eager_scaling_strategy_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 278234e0021..091ba0e5a98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ Here is an overview of all new **experimental** features: ### Improvements +- **General**: Added `eagerScalingStrategy` for `ScaledJob` ([#5114](https://github.com/kedacore/keda/issues/5114)) - **Azure queue scaler**: Added new configuration option 'queueLengthStrategy' ([#4478](https://github.com/kedacore/keda/issues/4478)) - **Cassandra Scaler**: Add TLS support for cassandra scaler ([#5802](https://github.com/kedacore/keda/issues/5802)) - **GCP Pub/Sub**: Add optional valueIfNull to allow a default scaling value and prevent errors when GCP metric returns no value. ([#5896](https://github.com/kedacore/keda/issues/5896)) diff --git a/pkg/scaling/executor/scale_jobs.go b/pkg/scaling/executor/scale_jobs.go index bb671afbdf1..3c1b629ce18 100644 --- a/pkg/scaling/executor/scale_jobs.go +++ b/pkg/scaling/executor/scale_jobs.go @@ -95,7 +95,7 @@ func (e *scaleExecutor) getScalingDecision(scaledJob *kedav1alpha1.ScaledJob, ru scaleTo = scaleToMinReplica effectiveMaxScale = scaleToMinReplica } else { - effectiveMaxScale = NewScalingStrategy(logger, scaledJob).GetEffectiveMaxScale(maxScale, runningJobCount-minReplicaCount, pendingJobCount, scaledJob.MaxReplicaCount()) + effectiveMaxScale, scaleTo = NewScalingStrategy(logger, scaledJob).GetEffectiveMaxScale(maxScale, runningJobCount-minReplicaCount, pendingJobCount, scaledJob.MaxReplicaCount(), scaleTo) } return effectiveMaxScale, scaleTo } @@ -391,6 +391,9 @@ func NewScalingStrategy(logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) S case "accurate": logger.V(1).Info("Selecting Scale Strategy", "specified", scaledJob.Spec.ScalingStrategy.Strategy, "selected", "accurate") return accurateScalingStrategy{} + case "eager": + logger.V(1).Info("Selecting Scale Strategy", "specified", scaledJob.Spec.ScalingStrategy.Strategy, "selected", "eager") + return eagerScalingStrategy{} default: logger.V(1).Info("Selecting Scale Strategy", "specified", scaledJob.Spec.ScalingStrategy.Strategy, "selected", "default") return defaultScalingStrategy{} @@ -399,14 +402,14 @@ func NewScalingStrategy(logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) S // ScalingStrategy is an interface for switching scaling algorithm type ScalingStrategy interface { - GetEffectiveMaxScale(maxScale, runningJobCount, pendingJobCount, maxReplicaCount int64) int64 + GetEffectiveMaxScale(maxScale, runningJobCount, pendingJobCount, maxReplicaCount, scaleTo int64) (int64, int64) } type defaultScalingStrategy struct { } -func (s defaultScalingStrategy) GetEffectiveMaxScale(maxScale, runningJobCount, _, _ int64) int64 { - return maxScale - runningJobCount +func (s defaultScalingStrategy) GetEffectiveMaxScale(maxScale, runningJobCount, _, _, scaleTo int64) (int64, int64) { + return maxScale - runningJobCount, scaleTo } type customScalingStrategy struct { @@ -414,18 +417,25 @@ type customScalingStrategy struct { CustomScalingRunningJobPercentage *float64 } -func (s customScalingStrategy) GetEffectiveMaxScale(maxScale, runningJobCount, _, maxReplicaCount int64) int64 { - return min(maxScale-int64(*s.CustomScalingQueueLengthDeduction)-int64(float64(runningJobCount)*(*s.CustomScalingRunningJobPercentage)), maxReplicaCount) +func (s customScalingStrategy) GetEffectiveMaxScale(maxScale, runningJobCount, _, maxReplicaCount, scaleTo int64) (int64, int64) { + return min(maxScale-int64(*s.CustomScalingQueueLengthDeduction)-int64(float64(runningJobCount)*(*s.CustomScalingRunningJobPercentage)), maxReplicaCount), scaleTo } type accurateScalingStrategy struct { } -func (s accurateScalingStrategy) GetEffectiveMaxScale(maxScale, runningJobCount, pendingJobCount, maxReplicaCount int64) int64 { +func (s accurateScalingStrategy) GetEffectiveMaxScale(maxScale, runningJobCount, pendingJobCount, maxReplicaCount, scaleTo int64) (int64, int64) { if (maxScale + runningJobCount) > maxReplicaCount { - return maxReplicaCount - runningJobCount + return maxReplicaCount - runningJobCount, scaleTo } - return maxScale - pendingJobCount + return maxScale - pendingJobCount, scaleTo +} + +type eagerScalingStrategy struct { +} + +func (s eagerScalingStrategy) GetEffectiveMaxScale(maxScale, runningJobCount, pendingJobCount, maxReplicaCount, _ int64) (int64, int64) { + return min(maxReplicaCount-runningJobCount-pendingJobCount, maxScale), maxReplicaCount } func min(x, y int64) int64 { diff --git a/pkg/scaling/executor/scale_jobs_test.go b/pkg/scaling/executor/scale_jobs_test.go index 7542c1acb58..c3f625a016b 100644 --- a/pkg/scaling/executor/scale_jobs_test.go +++ b/pkg/scaling/executor/scale_jobs_test.go @@ -81,13 +81,17 @@ func TestNewNewScalingStrategy(t *testing.T) { assert.Equal(t, "executor.defaultScalingStrategy", fmt.Sprintf("%T", strategy)) } +func maxScaleValue(maxValue, _ int64) int64 { + return maxValue +} + func TestDefaultScalingStrategy(t *testing.T) { logger := logf.Log.WithName("ScaledJobTest") strategy := NewScalingStrategy(logger, getMockScaledJobWithDefaultStrategy("default")) // maxScale doesn't exceed MaxReplicaCount. You can ignore on this sceanrio // pendingJobCount isn't relevant on this scenario - assert.Equal(t, int64(1), strategy.GetEffectiveMaxScale(3, 2, 0, 5)) - assert.Equal(t, int64(2), strategy.GetEffectiveMaxScale(2, 0, 0, 5)) + assert.Equal(t, int64(1), maxScaleValue(strategy.GetEffectiveMaxScale(3, 2, 0, 5, 1))) + assert.Equal(t, int64(2), maxScaleValue(strategy.GetEffectiveMaxScale(2, 0, 0, 5, 1))) } func TestCustomScalingStrategy(t *testing.T) { @@ -97,13 +101,13 @@ func TestCustomScalingStrategy(t *testing.T) { strategy := NewScalingStrategy(logger, getMockScaledJobWithStrategy("custom", "custom", customScalingQueueLengthDeduction, customScalingRunningJobPercentage)) // maxScale doesn't exceed MaxReplicaCount. You can ignore on this sceanrio // pendingJobCount isn't relevant on this scenario - assert.Equal(t, int64(1), strategy.GetEffectiveMaxScale(3, 2, 0, 5)) - assert.Equal(t, int64(9), strategy.GetEffectiveMaxScale(10, 0, 0, 10)) + assert.Equal(t, int64(1), maxScaleValue(strategy.GetEffectiveMaxScale(3, 2, 0, 5, 1))) + assert.Equal(t, int64(9), maxScaleValue(strategy.GetEffectiveMaxScale(10, 0, 0, 10, 1))) strategy = NewScalingStrategy(logger, getMockScaledJobWithCustomStrategyWithNilParameter("custom", "custom")) // If you don't set the two parameters is the same behavior as DefaultStrategy - assert.Equal(t, int64(1), strategy.GetEffectiveMaxScale(3, 2, 0, 5)) - assert.Equal(t, int64(2), strategy.GetEffectiveMaxScale(2, 0, 0, 5)) + assert.Equal(t, int64(1), maxScaleValue(strategy.GetEffectiveMaxScale(3, 2, 0, 5, 1))) + assert.Equal(t, int64(2), maxScaleValue(strategy.GetEffectiveMaxScale(2, 0, 0, 5, 1))) // Empty String will be DefaultStrategy customScalingQueueLengthDeduction = int32(1) @@ -115,25 +119,48 @@ func TestCustomScalingStrategy(t *testing.T) { customScalingQueueLengthDeduction = int32(2) customScalingRunningJobPercentage = "0" strategy = NewScalingStrategy(logger, getMockScaledJobWithStrategy("custom", "custom", customScalingQueueLengthDeduction, customScalingRunningJobPercentage)) - assert.Equal(t, int64(1), strategy.GetEffectiveMaxScale(3, 2, 0, 5)) + assert.Equal(t, int64(1), maxScaleValue(strategy.GetEffectiveMaxScale(3, 2, 0, 5, 1))) // Exceed the MaxReplicaCount customScalingQueueLengthDeduction = int32(-2) customScalingRunningJobPercentage = "0" strategy = NewScalingStrategy(logger, getMockScaledJobWithStrategy("custom", "custom", customScalingQueueLengthDeduction, customScalingRunningJobPercentage)) - assert.Equal(t, int64(4), strategy.GetEffectiveMaxScale(3, 2, 0, 4)) + assert.Equal(t, int64(4), maxScaleValue(strategy.GetEffectiveMaxScale(3, 2, 0, 4, 1))) } func TestAccurateScalingStrategy(t *testing.T) { logger := logf.Log.WithName("ScaledJobTest") strategy := NewScalingStrategy(logger, getMockScaledJobWithStrategy("accurate", "accurate", 0, "0")) // maxScale doesn't exceed MaxReplicaCount. You can ignore on this sceanrio - assert.Equal(t, int64(3), strategy.GetEffectiveMaxScale(3, 2, 0, 5)) - assert.Equal(t, int64(3), strategy.GetEffectiveMaxScale(5, 2, 0, 5)) + assert.Equal(t, int64(3), maxScaleValue(strategy.GetEffectiveMaxScale(3, 2, 0, 5, 1))) + assert.Equal(t, int64(3), maxScaleValue(strategy.GetEffectiveMaxScale(5, 2, 0, 5, 1))) // Test with 2 pending jobs - assert.Equal(t, int64(1), strategy.GetEffectiveMaxScale(3, 4, 2, 10)) - assert.Equal(t, int64(1), strategy.GetEffectiveMaxScale(5, 4, 2, 5)) + assert.Equal(t, int64(1), maxScaleValue(strategy.GetEffectiveMaxScale(3, 4, 2, 10, 1))) + assert.Equal(t, int64(1), maxScaleValue(strategy.GetEffectiveMaxScale(5, 4, 2, 5, 1))) +} + +func TestEagerScalingStrategy(t *testing.T) { + logger := logf.Log.WithName("ScaledJobTest") + strategy := NewScalingStrategy(logger, getMockScaledJobWithStrategy("eager", "eager", 0, "0")) + + maxScale, scaleTo := strategy.GetEffectiveMaxScale(4, 3, 0, 10, 1) + assert.Equal(t, int64(4), maxScale) + assert.Equal(t, int64(10), scaleTo) + maxScale, scaleTo = strategy.GetEffectiveMaxScale(4, 0, 3, 10, 1) + assert.Equal(t, int64(4), maxScale) + assert.Equal(t, int64(10), scaleTo) + + maxScale, scaleTo = strategy.GetEffectiveMaxScale(4, 7, 0, 10, 1) + assert.Equal(t, int64(3), maxScale) + assert.Equal(t, int64(10), scaleTo) + maxScale, scaleTo = strategy.GetEffectiveMaxScale(4, 1, 6, 10, 1) + assert.Equal(t, int64(3), maxScale) + assert.Equal(t, int64(10), scaleTo) + + maxScale, scaleTo = strategy.GetEffectiveMaxScale(15, 0, 0, 10, 1) + assert.Equal(t, int64(10), maxScale) + assert.Equal(t, int64(10), scaleTo) } func TestCleanUpMixedCaseWithSortByTime(t *testing.T) { diff --git a/tests/internals/scaling_strategies/eager_scaling_strategy_test.go b/tests/internals/scaling_strategies/eager_scaling_strategy_test.go new file mode 100644 index 00000000000..e05c84c30a0 --- /dev/null +++ b/tests/internals/scaling_strategies/eager_scaling_strategy_test.go @@ -0,0 +1,134 @@ +//go:build e2e +// +build e2e + +package eager_scaling_strategy_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" // For helper methods + . "github.com/kedacore/keda/v2/tests/scalers/rabbitmq" +) + +var _ = godotenv.Load("../../.env") // For loading env variables from .env + +const ( + testName = "eager-scaling-strategy-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + rmqNamespace = fmt.Sprintf("%s-rmq", testName) + scaledJobName = fmt.Sprintf("%s-sj", testName) + queueName = "hello" + user = fmt.Sprintf("%s-user", testName) + password = fmt.Sprintf("%s-password", testName) + vhost = "/" + connectionString = fmt.Sprintf("amqp://%s:%s@rabbitmq.%s.svc.cluster.local/", user, password, rmqNamespace) + httpConnectionString = fmt.Sprintf("http://%s:%s@rabbitmq.%s.svc.cluster.local/", user, password, rmqNamespace) + secretName = fmt.Sprintf("%s-secret", testName) +) + +// YAML templates for your Kubernetes resources +const ( + scaledJobTemplate = ` +apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + RabbitApiHost: {{.Base64Connection}} +--- +apiVersion: keda.sh/v1alpha1 +kind: ScaledJob +metadata: + name: {{.ScaledJobName}} + namespace: {{.TestNamespace}} + labels: + app: {{.ScaledJobName}} +spec: + jobTargetRef: + template: + spec: + containers: + - name: sleeper + image: busybox + command: + - sleep + - "300" + imagePullPolicy: IfNotPresent + envFrom: + - secretRef: + name: {{.SecretName}} + restartPolicy: Never + backoffLimit: 1 + pollingInterval: 5 + maxReplicaCount: 10 + scalingStrategy: + strategy: "eager" + triggers: + - type: rabbitmq + metadata: + queueName: {{.QueueName}} + hostFromEnv: RabbitApiHost + mode: QueueLength + value: '1' +` +) + +type templateData struct { + ScaledJobName string + TestNamespace string + QueueName string + SecretName string + Base64Connection string +} + +func TestScalingStrategy(t *testing.T) { + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + RMQUninstall(t, rmqNamespace, user, password, vhost, WithoutOAuth()) + }) + + RMQInstall(t, kc, rmqNamespace, user, password, vhost, WithoutOAuth()) + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + testEagerScaling(t, kc) +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + // Populate fields required in YAML templates + ScaledJobName: scaledJobName, + TestNamespace: testNamespace, + QueueName: queueName, + Base64Connection: base64.StdEncoding.EncodeToString([]byte(httpConnectionString)), + SecretName: secretName, + }, []Template{ + {Name: "scaledJobTemplate", Config: scaledJobTemplate}, + } +} + +func testEagerScaling(t *testing.T, kc *kubernetes.Clientset) { + iterationCount := 20 + RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 4, iterationCount, 1), + "job count should be %d after %d iterations", 4, iterationCount) + + RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 8, iterationCount, 1), + "job count should be %d after %d iterations", 8, iterationCount) + + RMQPublishMessages(t, rmqNamespace, connectionString, queueName, 4) + assert.True(t, WaitForScaledJobCount(t, kc, scaledJobName, testNamespace, 10, iterationCount, 1), + "job count should be %d after %d iterations", 10, iterationCount) +} From d69b00fb90ec90830f655c3f1546b2c5cfba6c92 Mon Sep 17 00:00:00 2001 From: Josef Karasek Date: Tue, 30 Jul 2024 15:13:34 +0200 Subject: [PATCH 26/37] [BUG-5656] Annotate Jobs with parent ScaledJob generation (#5876) * Annotate Jobs with parent ScaledJob generation Signed-off-by: Josef Karasek * fix tests Signed-off-by: Josef Karasek * fix lint Signed-off-by: Josef Karasek * fix log message Signed-off-by: Josef Karasek * update changelog Signed-off-by: Josef Karasek * update changelog Signed-off-by: Josef Karasek * update changelog Signed-off-by: Josef Karasek --------- Signed-off-by: Josef Karasek Signed-off-by: Zbynek Roubalik Co-authored-by: Zbynek Roubalik Signed-off-by: novoselov --- CHANGELOG.md | 2 +- controllers/keda/scaledjob_controller.go | 38 ++++++++++++++++-------- pkg/scaling/executor/scale_jobs.go | 13 +++++++- pkg/scaling/executor/scale_jobs_test.go | 7 +++-- 4 files changed, 44 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 091ba0e5a98..820da55d63b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,8 +74,8 @@ Here is an overview of all new **experimental** features: - **Datadog Scaler**: Add support to use the Cluster Agent as source of metrics ([#5355](https://github.com/kedacore/keda/issues/5355)) ### Improvements - - **General**: Added `eagerScalingStrategy` for `ScaledJob` ([#5114](https://github.com/kedacore/keda/issues/5114)) +- **General**: Do not delete running Jobs on KEDA restart ([#5656](https://github.com/kedacore/keda/issues/5656)) - **Azure queue scaler**: Added new configuration option 'queueLengthStrategy' ([#4478](https://github.com/kedacore/keda/issues/4478)) - **Cassandra Scaler**: Add TLS support for cassandra scaler ([#5802](https://github.com/kedacore/keda/issues/5802)) - **GCP Pub/Sub**: Add optional valueIfNull to allow a default scaling value and prevent errors when GCP metric returns no value. ([#5896](https://github.com/kedacore/keda/issues/5896)) diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 98c1ce87cc8..845ba5aca90 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -279,22 +279,36 @@ func (r *ScaledJobReconciler) deletePreviousVersionScaleJobs(ctx context.Context return "Cannot get list of Jobs owned by this scaledJob", err } - if len(jobs.Items) > 0 { - logger.Info("RolloutStrategy: immediate, Deleting jobs owned by the previous version of the scaledJob", "numJobsToDelete", len(jobs.Items)) + jobIndexes := make([]int, 0, len(jobs.Items)) + scaledJobGeneration := strconv.FormatInt(scaledJob.Generation, 10) + for i, job := range jobs.Items { + if jobGen, ok := job.Annotations["scaledjob.keda.sh/generation"]; !ok { + // delete Jobs that don't have the generation annotation + jobIndexes = append(jobIndexes, i) + } else if jobGen != scaledJobGeneration { + // delete Jobs that have a different generation annotation + jobIndexes = append(jobIndexes, i) + } } - for _, job := range jobs.Items { - job := job - propagationPolicy := metav1.DeletePropagationBackground - if scaledJob.Spec.Rollout.PropagationPolicy == "foreground" { - propagationPolicy = metav1.DeletePropagationForeground - } - err = r.Client.Delete(ctx, &job, client.PropagationPolicy(propagationPolicy)) - if err != nil { - return "Not able to delete job: " + job.Name, err + if len(jobIndexes) == 0 { + logger.Info("RolloutStrategy: immediate, No jobs owned by the previous version of the scaledJob") + } else { + logger.Info("RolloutStrategy: immediate, Deleting jobs owned by the previous version of the scaledJob", "numJobsToDelete", len(jobIndexes)) + for _, index := range jobIndexes { + job := jobs.Items[index] + + propagationPolicy := metav1.DeletePropagationBackground + if scaledJob.Spec.Rollout.PropagationPolicy == "foreground" { + propagationPolicy = metav1.DeletePropagationForeground + } + err = r.Client.Delete(ctx, &job, client.PropagationPolicy(propagationPolicy)) + if err != nil { + return "Not able to delete job: " + job.Name, err + } } + return fmt.Sprintf("RolloutStrategy: immediate, deleted jobs owned by the previous version of the scaleJob: %d jobs deleted", len(jobIndexes)), nil } - return fmt.Sprintf("RolloutStrategy: immediate, deleted jobs owned by the previous version of the scaleJob: %d jobs deleted", len(jobs.Items)), nil } return fmt.Sprintf("RolloutStrategy: %s", scaledJob.Spec.RolloutStrategy), nil } diff --git a/pkg/scaling/executor/scale_jobs.go b/pkg/scaling/executor/scale_jobs.go index 3c1b629ce18..2de69ffce77 100644 --- a/pkg/scaling/executor/scale_jobs.go +++ b/pkg/scaling/executor/scale_jobs.go @@ -101,6 +101,10 @@ func (e *scaleExecutor) getScalingDecision(scaledJob *kedav1alpha1.ScaledJob, ru } func (e *scaleExecutor) createJobs(ctx context.Context, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob, scaleTo int64, maxScale int64) { + if maxScale <= 0 { + logger.Info("No need to create jobs - all requested jobs already exist", "jobs", maxScale) + return + } logger.Info("Creating jobs", "Effective number of max jobs", maxScale) if scaleTo > maxScale { scaleTo = maxScale @@ -137,6 +141,13 @@ func (e *scaleExecutor) generateJobs(logger logr.Logger, scaledJob *kedav1alpha1 labels[key] = value } + annotations := map[string]string{ + "scaledjob.keda.sh/generation": strconv.FormatInt(scaledJob.Generation, 10), + } + for key, value := range scaledJob.ObjectMeta.Annotations { + annotations[key] = value + } + jobs := make([]*batchv1.Job, int(scaleTo)) for i := 0; i < int(scaleTo); i++ { job := &batchv1.Job{ @@ -144,7 +155,7 @@ func (e *scaleExecutor) generateJobs(logger logr.Logger, scaledJob *kedav1alpha1 GenerateName: scaledJob.GetName() + "-", Namespace: scaledJob.GetNamespace(), Labels: labels, - Annotations: scaledJob.ObjectMeta.Annotations, + Annotations: annotations, }, Spec: *scaledJob.Spec.JobTargetRef.DeepCopy(), } diff --git a/pkg/scaling/executor/scale_jobs_test.go b/pkg/scaling/executor/scale_jobs_test.go index c3f625a016b..9652abdc653 100644 --- a/pkg/scaling/executor/scale_jobs_test.go +++ b/pkg/scaling/executor/scale_jobs_test.go @@ -343,8 +343,11 @@ func TestCreateJobs(t *testing.T) { func TestGenerateJobs(t *testing.T) { var ( - expectedAnnotations = map[string]string{"test": "test"} - expectedLabels = map[string]string{ + expectedAnnotations = map[string]string{ + "test": "test", + "scaledjob.keda.sh/generation": "0", + } + expectedLabels = map[string]string{ "app.kubernetes.io/managed-by": "keda-operator", "app.kubernetes.io/name": "test", "app.kubernetes.io/part-of": "test", From 26e9672032879353297feb9d22572249ca3145a1 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Tue, 30 Jul 2024 15:36:45 +0200 Subject: [PATCH 27/37] chore: Update e2e image (#6000) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .github/workflows/pr-e2e.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 3f5d44c5e63..649d4e7cde9 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -68,7 +68,7 @@ jobs: needs: triage runs-on: ubuntu-latest name: Build images - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 if: needs.triage.outputs.run-e2e == 'true' steps: - name: Set status in-progress @@ -148,7 +148,7 @@ jobs: needs: [triage, build-test-images] runs-on: e2e name: Execute e2e tests - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 if: needs.triage.outputs.run-e2e == 'true' steps: - name: Set status in-progress From f5b549fcbfd1477fa9a895e14c20c078208a8bd5 Mon Sep 17 00:00:00 2001 From: Josef Karasek Date: Tue, 30 Jul 2024 16:33:38 +0200 Subject: [PATCH 28/37] [BUG-5922] Report failing ScaledJob triggers in status (#5916) Signed-off-by: Josef Karasek Signed-off-by: novoselov --- CHANGELOG.md | 1 + .../mock_executor/mock_interface.go | 8 +++--- pkg/scaling/executor/scale_executor.go | 2 +- pkg/scaling/executor/scale_jobs.go | 15 ++++++++++- pkg/scaling/scale_handler.go | 20 ++++++++------- pkg/scaling/scale_handler_test.go | 25 +++++++++++++------ 6 files changed, 48 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 820da55d63b..3014dc32694 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,7 @@ Here is an overview of all new **experimental** features: - **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806)) - **MongoDB Scaler**: MongoDB url parses correctly `+srv` scheme ([#5760](https://github.com/kedacore/keda/issues/5760)) - **New Relic Scaler**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp ([#5944](https://github.com/kedacore/keda/issues/5944)) +- **ScaledJob**: Fix ScaledJob ignores failing trigger(s) error ([#5922](https://github.com/kedacore/keda/issues/5922)) ### Deprecations diff --git a/pkg/mock/mock_scaling/mock_executor/mock_interface.go b/pkg/mock/mock_scaling/mock_executor/mock_interface.go index 53836a597fb..eef9c5fc4bd 100644 --- a/pkg/mock/mock_scaling/mock_executor/mock_interface.go +++ b/pkg/mock/mock_scaling/mock_executor/mock_interface.go @@ -42,15 +42,15 @@ func (m *MockScaleExecutor) EXPECT() *MockScaleExecutorMockRecorder { } // RequestJobScale mocks base method. -func (m *MockScaleExecutor) RequestJobScale(ctx context.Context, scaledJob *v1alpha1.ScaledJob, isActive bool, scaleTo, maxScale int64) { +func (m *MockScaleExecutor) RequestJobScale(ctx context.Context, scaledJob *v1alpha1.ScaledJob, isActive, isError bool, scaleTo, maxScale int64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RequestJobScale", ctx, scaledJob, isActive, scaleTo, maxScale) + m.ctrl.Call(m, "RequestJobScale", ctx, scaledJob, isActive, isError, scaleTo, maxScale) } // RequestJobScale indicates an expected call of RequestJobScale. -func (mr *MockScaleExecutorMockRecorder) RequestJobScale(ctx, scaledJob, isActive, scaleTo, maxScale any) *gomock.Call { +func (mr *MockScaleExecutorMockRecorder) RequestJobScale(ctx, scaledJob, isActive, isError, scaleTo, maxScale any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestJobScale", reflect.TypeOf((*MockScaleExecutor)(nil).RequestJobScale), ctx, scaledJob, isActive, scaleTo, maxScale) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestJobScale", reflect.TypeOf((*MockScaleExecutor)(nil).RequestJobScale), ctx, scaledJob, isActive, isError, scaleTo, maxScale) } // RequestScale mocks base method. diff --git a/pkg/scaling/executor/scale_executor.go b/pkg/scaling/executor/scale_executor.go index ee5f15aa171..b28061a495e 100644 --- a/pkg/scaling/executor/scale_executor.go +++ b/pkg/scaling/executor/scale_executor.go @@ -39,7 +39,7 @@ const ( // ScaleExecutor contains methods RequestJobScale and RequestScale type ScaleExecutor interface { - RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) + RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, isError bool, scaleTo int64, maxScale int64) RequestScale(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, isActive bool, isError bool, options *ScaleExecutorOptions) } diff --git a/pkg/scaling/executor/scale_jobs.go b/pkg/scaling/executor/scale_jobs.go index 2de69ffce77..b32eb344f07 100644 --- a/pkg/scaling/executor/scale_jobs.go +++ b/pkg/scaling/executor/scale_jobs.go @@ -38,7 +38,7 @@ const ( defaultFailedJobsHistoryLimit = int32(100) ) -func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) { +func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive, isError bool, scaleTo int64, maxScale int64) { logger := e.logger.WithValues("scaledJob.Name", scaledJob.Name, "scaledJob.Namespace", scaledJob.Namespace) runningJobCount := e.getRunningJobCount(ctx, scaledJob) @@ -65,6 +65,19 @@ func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1al logger.V(1).Info("No change in activity") } + if isError { + // some triggers responded with error + // Set ScaledJob.Status.ReadyCondition to Unknown + readyCondition := scaledJob.Status.Conditions.GetReadyCondition() + msg := "Some triggers defined in ScaledJob are not working correctly" + logger.V(1).Info(msg) + if !readyCondition.IsUnknown() { + if err := e.setReadyCondition(ctx, logger, scaledJob, metav1.ConditionUnknown, "PartialTriggerError", msg); err != nil { + logger.Error(err, "error setting ready condition") + } + } + } + condition := scaledJob.Status.Conditions.GetActiveCondition() if condition.IsUnknown() || condition.IsTrue() != isActive { if isActive { diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 007b43e4501..5a955e48e66 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -259,8 +259,8 @@ func (h *scaleHandler) checkScalers(ctx context.Context, scalableObject interfac return } - isActive, scaleTo, maxScale := h.isScaledJobActive(ctx, obj) - h.scaleExecutor.RequestJobScale(ctx, obj, isActive, scaleTo, maxScale) + isActive, isError, scaleTo, maxScale := h.isScaledJobActive(ctx, obj) + h.scaleExecutor.RequestJobScale(ctx, obj, isActive, isError, scaleTo, maxScale) } } @@ -813,15 +813,16 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, // getScaledJobMetrics returns metrics for specified metric name for a ScaledJob identified by its name and namespace. // It could either query the metric value directly from the scaler or from a cache, that's being stored for the scaler. -func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) []scaledjob.ScalerMetrics { +func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) ([]scaledjob.ScalerMetrics, bool) { logger := log.WithValues("scaledJob.Namespace", scaledJob.Namespace, "scaledJob.Name", scaledJob.Name) cache, err := h.GetScalersCache(ctx, scaledJob) metricscollector.RecordScaledJobError(scaledJob.Namespace, scaledJob.Name, err) if err != nil { log.Error(err, "error getting scalers cache", "scaledJob.Namespace", scaledJob.Namespace, "scaledJob.Name", scaledJob.Name) - return nil + return nil, true } + var isError bool var scalersMetrics []scaledjob.ScalerMetrics scalers, scalerConfigs := cache.GetScalers() for scalerIndex, scaler := range scalers { @@ -849,8 +850,9 @@ func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav metricscollector.RecordScalerLatency(scaledJob.Namespace, scaledJob.Name, scalerName, scalerIndex, metricName, false, latency) } if err != nil { - scalerLogger.V(1).Info("Error getting scaler metrics and activity, but continue", "error", err) + scalerLogger.Error(err, "Error getting scaler metrics and activity, but continue") cache.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) + isError = true continue } if isTriggerActive { @@ -883,21 +885,21 @@ func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav metricscollector.RecordScalerActive(scaledJob.Namespace, scaledJob.Name, scalerName, scalerIndex, metricName, false, isTriggerActive) } } - return scalersMetrics + return scalersMetrics, isError } // isScaledJobActive returns whether the input ScaledJob: // is active as the first return value, // the second and the third return values indicate queueLength and maxValue for scale -func (h *scaleHandler) isScaledJobActive(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) (bool, int64, int64) { +func (h *scaleHandler) isScaledJobActive(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) (bool, bool, int64, int64) { logger := logf.Log.WithName("scalemetrics") - scalersMetrics := h.getScaledJobMetrics(ctx, scaledJob) + scalersMetrics, isError := h.getScaledJobMetrics(ctx, scaledJob) isActive, queueLength, maxValue, maxFloatValue := scaledjob.IsScaledJobActive(scalersMetrics, scaledJob.Spec.ScalingStrategy.MultipleScalersCalculation, scaledJob.MinReplicaCount(), scaledJob.MaxReplicaCount()) logger.V(1).WithValues("scaledJob.Name", scaledJob.Name).Info("Checking if ScaleJob Scalers are active", "isActive", isActive, "maxValue", maxFloatValue, "MultipleScalersCalculation", scaledJob.Spec.ScalingStrategy.MultipleScalersCalculation) - return isActive, queueLength, maxValue + return isActive, isError, queueLength, maxValue } // getTrueMetricArray is a help function made for composite scaler to determine diff --git a/pkg/scaling/scale_handler_test.go b/pkg/scaling/scale_handler_test.go index 7a822c163a8..23180308841 100644 --- a/pkg/scaling/scale_handler_test.go +++ b/pkg/scaling/scale_handler_test.go @@ -661,19 +661,21 @@ func TestIsScaledJobActive(t *testing.T) { scalerCachesLock: &sync.RWMutex{}, scaledObjectsMetricCache: metricscache.NewMetricsCache(), } - isActive, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) + // nosemgrep: context-todo + isActive, isError, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) assert.Equal(t, true, isActive) + assert.Equal(t, false, isError) assert.Equal(t, int64(20), queueLength) assert.Equal(t, int64(10), maxValue) scalerCache.Close(context.Background()) // Test the valiation scalerTestDatam := []scalerTestData{ - newScalerTestData("s0-queueLength", 100, "max", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 20, 20), - newScalerTestData("queueLength", 100, "min", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 5, 2), - newScalerTestData("messageCount", 100, "avg", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 12, 9), - newScalerTestData("s3-messageCount", 100, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 35, 27), - newScalerTestData("s10-messageCount", 25, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 35, 25), + newScalerTestData("s0-queueLength", 100, "max", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 20, 20), + newScalerTestData("queueLength", 100, "min", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 5, 2), + newScalerTestData("messageCount", 100, "avg", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 12, 9), + newScalerTestData("s3-messageCount", 100, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 35, 27), + newScalerTestData("s10-messageCount", 25, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 35, 25), } for index, scalerTestData := range scalerTestDatam { @@ -717,9 +719,11 @@ func TestIsScaledJobActive(t *testing.T) { scaledObjectsMetricCache: metricscache.NewMetricsCache(), } fmt.Printf("index: %d", index) - isActive, queueLength, maxValue = sh.isScaledJobActive(context.TODO(), scaledJob) + // nosemgrep: context-todo + isActive, isError, queueLength, maxValue = sh.isScaledJobActive(context.TODO(), scaledJob) // assert.Equal(t, 5, index) assert.Equal(t, scalerTestData.ResultIsActive, isActive) + assert.Equal(t, scalerTestData.ResultIsError, isError) assert.Equal(t, scalerTestData.ResultQueueLength, queueLength) assert.Equal(t, scalerTestData.ResultMaxValue, maxValue) scalerCache.Close(context.Background()) @@ -757,8 +761,10 @@ func TestIsScaledJobActiveIfQueueEmptyButMinReplicaCountGreaterZero(t *testing.T scaledObjectsMetricCache: metricscache.NewMetricsCache(), } - isActive, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) + // nosemgrep: context-todo + isActive, isError, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) assert.Equal(t, true, isActive) + assert.Equal(t, false, isError) assert.Equal(t, int64(0), queueLength) assert.Equal(t, int64(0), maxValue) scalerCache.Close(context.Background()) @@ -781,6 +787,7 @@ func newScalerTestData( scaler4AverageValue int, //nolint:golint,unparam scaler4IsActive bool, //nolint:golint,unparam resultIsActive bool, //nolint:golint,unparam + resultIsError bool, //nolint:golint,unparam resultQueueLength, resultMaxLength int) scalerTestData { return scalerTestData{ @@ -800,6 +807,7 @@ func newScalerTestData( Scaler4AverageValue: int64(scaler4AverageValue), Scaler4IsActive: scaler4IsActive, ResultIsActive: resultIsActive, + ResultIsError: resultIsError, ResultQueueLength: int64(resultQueueLength), ResultMaxValue: int64(resultMaxLength), } @@ -822,6 +830,7 @@ type scalerTestData struct { Scaler4AverageValue int64 Scaler4IsActive bool ResultIsActive bool + ResultIsError bool ResultQueueLength int64 ResultMaxValue int64 MinReplicaCount int32 From f886fcace65f315b5a1a0db1d65b5b61561d98cb Mon Sep 17 00:00:00 2001 From: Paul Yu Date: Tue, 30 Jul 2024 07:59:20 -0700 Subject: [PATCH 29/37] chore: build with keda-tools:1.22.5 (#5971) * chore: build with keda-tools:1.22.5 to resolve CVE-2024-24790, CVE-2024-24789, and CVE-2024-24791 bump github.com/Azure/azure-sdk-for-go/sdk/azidentity to resolve CVE-2024-35255 Signed-off-by: Paul Yu * chore: use go install instead of go get and replacing deprecated tools Signed-off-by: Paul Yu * chore: vendor dependency cleanup Signed-off-by: Paul Yu * Update missing references to 1.21 Signed-off-by: Jorge Turrado --------- Signed-off-by: Paul Yu Signed-off-by: Jorge Turrado Co-authored-by: Jorge Turrado Ferrero Signed-off-by: novoselov --- .devcontainer/Dockerfile | 52 +-- .github/workflows/fossa.yml | 2 +- .github/workflows/main-build.yml | 4 +- .github/workflows/pr-validation.yml | 14 +- .github/workflows/release-build.yml | 4 +- .github/workflows/static-analysis-codeql.yml | 2 +- .github/workflows/template-main-e2e-test.yml | 2 +- .github/workflows/template-smoke-tests.yml | 2 +- CHANGELOG.md | 4 + Dockerfile | 2 +- Dockerfile.adapter | 2 +- Dockerfile.webhooks | 2 +- go.mod | 24 +- go.sum | 20 +- .../sdk/azidentity/.gitignore | 4 + .../sdk/azidentity/CHANGELOG.md | 78 +++- .../sdk/azidentity/MIGRATION.md | 4 +- .../azure-sdk-for-go/sdk/azidentity/README.md | 23 +- .../sdk/azidentity/TOKEN_CACHING.MD | 3 +- .../sdk/azidentity/TROUBLESHOOTING.md | 42 +- .../sdk/azidentity/assets.json | 2 +- .../sdk/azidentity/azure_cli_credential.go | 33 +- .../azidentity/azure_pipelines_credential.go | 140 ++++++ .../azidentity/chained_token_credential.go | 2 +- .../azure-sdk-for-go/sdk/azidentity/ci.yml | 39 +- .../azidentity/client_assertion_credential.go | 2 +- .../client_certificate_credential.go | 9 +- .../sdk/azidentity/confidential_client.go | 6 +- .../azidentity/default_azure_credential.go | 49 +-- .../azidentity/developer_credential_util.go | 2 +- .../sdk/azidentity/device_code_credential.go | 4 +- .../sdk/azidentity/environment_credential.go | 5 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 48 ++- .../sdk/azidentity/go.work.sum | 19 + .../interactive_browser_credential.go | 4 +- .../azidentity/managed-identity-matrix.json | 17 + .../sdk/azidentity/managed_identity_client.go | 111 +++-- .../azidentity/managed_identity_credential.go | 9 +- .../sdk/azidentity/on_behalf_of_credential.go | 16 +- .../sdk/azidentity/public_client.go | 2 +- .../sdk/azidentity/test-resources-post.ps1 | 112 +++++ .../sdk/azidentity/test-resources-pre.ps1 | 20 +- .../sdk/azidentity/test-resources.bicep | 220 +++++++++- .../sdk/azidentity/version.go | 2 +- .../messaging/azeventgrid/internal/version.go | 2 +- .../messaging/azeventgrid/publisher/README.md | 2 - .../sdk/messaging/azservicebus/CHANGELOG.md | 6 - .../azservicebus/internal/conn/conn.go | 7 +- .../azservicebus/internal/constants.go | 2 +- .../testdata/client_intermediate_cert.der | Bin 998 -> 0 bytes .../testdata/client_leaf_cert.der | Bin 1147 -> 0 bytes .../testdata/client_root_cert.der | Bin 1013 -> 0 bytes .../testdata/server_intermediate_cert.der | Bin 998 -> 0 bytes .../testdata/server_leaf_cert.der | Bin 1147 -> 0 bytes .../testdata/server_root_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/client_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/client_cert.pem | 24 -- .../v2/remotesigner/testdata/client_key.pem | 27 -- .../v2/remotesigner/testdata/server_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/server_cert.pem | 24 -- .../v2/remotesigner/testdata/server_key.pem | 27 -- .../internal/v2/testdata/client_cert.pem | 24 -- .../internal/v2/testdata/client_key.pem | 27 -- .../internal/v2/testdata/server_cert.pem | 24 -- .../internal/v2/testdata/server_key.pem | 27 -- .../tlsconfigstore/testdata/client_cert.pem | 24 -- .../v2/tlsconfigstore/testdata/client_key.pem | 27 -- .../tlsconfigstore/testdata/server_cert.pem | 24 -- .../v2/tlsconfigstore/testdata/server_key.pem | 27 -- .../google/s2a-go/testdata/client_cert.pem | 24 -- .../google/s2a-go/testdata/client_key.pem | 27 -- .../s2a-go/testdata/mds_client_cert.pem | 19 - .../google/s2a-go/testdata/mds_client_key.pem | 28 -- .../google/s2a-go/testdata/mds_root_cert.pem | 21 - .../s2a-go/testdata/mds_server_cert.pem | 21 - .../google/s2a-go/testdata/mds_server_key.pem | 28 -- .../s2a-go/testdata/self_signed_cert.pem | 19 - .../s2a-go/testdata/self_signed_key.pem | 28 -- .../google/s2a-go/testdata/server_cert.pem | 24 -- .../google/s2a-go/testdata/server_key.pem | 27 -- .../valyala/fasthttp/examples/README.md | 4 - .../valyala/fasthttp/examples/client/Makefile | 6 - .../fasthttp/examples/client/README.md | 21 - .../fasthttp/examples/client/client.go | 132 ------ .../fasthttp/examples/fileserver/Makefile | 7 - .../fasthttp/examples/fileserver/README.md | 84 ---- .../examples/fileserver/fileserver.go | 120 ------ .../examples/fileserver/ssl-cert-snakeoil.pem | 17 - .../examples/helloworldserver/Makefile | 6 - .../examples/helloworldserver/README.md | 17 - .../helloworldserver/helloworldserver.go | 55 --- .../fasthttp/examples/host_client/Makefile | 6 - .../fasthttp/examples/host_client/README.md | 13 - .../examples/host_client/hostclient.go | 35 -- .../examples/letsencrypt/letsencryptserver.go | 41 -- .../fasthttp/examples/multidomain/Makefile | 6 - .../fasthttp/examples/multidomain/README.md | 15 - .../examples/multidomain/multidomain.go | 63 --- .../fasthttputil/inmemory_listener_test.go | 273 ------------ .../inmemory_listener_timing_test.go | 155 ------- .../fasthttp/fasthttputil/pipeconns_test.go | 407 ------------------ .../valyala/fasthttp/testdata/test.png | 1 - vendor/golang.org/x/sys/unix/mkerrors.sh | 2 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 20 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 37 +- .../x/sys/windows/security_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 9 + vendor/modules.txt | 16 +- 111 files changed, 999 insertions(+), 2327 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/server_key.pem delete mode 100644 vendor/github.com/valyala/fasthttp/examples/README.md delete mode 100644 vendor/github.com/valyala/fasthttp/examples/client/Makefile delete mode 100644 vendor/github.com/valyala/fasthttp/examples/client/README.md delete mode 100644 vendor/github.com/valyala/fasthttp/examples/client/client.go delete mode 100644 vendor/github.com/valyala/fasthttp/examples/fileserver/Makefile delete mode 100644 vendor/github.com/valyala/fasthttp/examples/fileserver/README.md delete mode 100644 vendor/github.com/valyala/fasthttp/examples/fileserver/fileserver.go delete mode 100644 vendor/github.com/valyala/fasthttp/examples/fileserver/ssl-cert-snakeoil.pem delete mode 100644 vendor/github.com/valyala/fasthttp/examples/helloworldserver/Makefile delete mode 100644 vendor/github.com/valyala/fasthttp/examples/helloworldserver/README.md delete mode 100644 vendor/github.com/valyala/fasthttp/examples/helloworldserver/helloworldserver.go delete mode 100644 vendor/github.com/valyala/fasthttp/examples/host_client/Makefile delete mode 100644 vendor/github.com/valyala/fasthttp/examples/host_client/README.md delete mode 100644 vendor/github.com/valyala/fasthttp/examples/host_client/hostclient.go delete mode 100644 vendor/github.com/valyala/fasthttp/examples/letsencrypt/letsencryptserver.go delete mode 100644 vendor/github.com/valyala/fasthttp/examples/multidomain/Makefile delete mode 100644 vendor/github.com/valyala/fasthttp/examples/multidomain/README.md delete mode 100644 vendor/github.com/valyala/fasthttp/examples/multidomain/multidomain.go delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_test.go delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_timing_test.go delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns_test.go delete mode 100644 vendor/github.com/valyala/fasthttp/testdata/test.png diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 9df7e2dfc06..d08e6a038ad 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. #------------------------------------------------------------------------------------------------------------- -FROM golang:1.21.9 +FROM golang:1.22.5 # Avoid warnings by switching to noninteractive ENV DEBIAN_FRONTEND=noninteractive @@ -16,8 +16,6 @@ ARG USERNAME=vscode ARG USER_UID=1000 ARG USER_GID=$USER_UID -ENV GO111MODULE=auto - # Configure apt, install packages and tools RUN apt-get update \ && apt-get -y install --no-install-recommends apt-utils dialog unzip 2>&1 \ @@ -25,32 +23,25 @@ RUN apt-get update \ # Verify git, process tools, lsb-release (common in install instructions for CLIs) installed && apt-get -y install git iproute2 procps lsb-release \ # - # Install gocode-gomod - && go get -x -d github.com/stamblerre/gocode 2>&1 \ - && go build -o gocode-gomod github.com/stamblerre/gocode \ - && mv gocode-gomod $GOPATH/bin/ \ - # # Install Go tools - && go get -u -v \ - github.com/mdempsky/gocode \ - github.com/uudashr/gopkgs/cmd/gopkgs \ - github.com/ramya-rao-a/go-outline \ - github.com/acroca/go-symbols \ - github.com/godoctor/godoctor \ - golang.org/x/tools/cmd/gorename \ - github.com/rogpeppe/godef \ - github.com/zmb3/gogetdoc \ - github.com/haya14busa/goplay/cmd/goplay \ - github.com/sqs/goreturns \ - github.com/josharian/impl \ - github.com/davidrjenni/reftools/cmd/fillstruct \ - github.com/fatih/gomodifytags \ - github.com/cweill/gotests/... \ - golang.org/x/tools/cmd/goimports \ - golang.org/x/lint/golint \ - github.com/alecthomas/gometalinter 2>&1 \ - github.com/mgechev/revive \ - github.com/derekparker/delve/cmd/dlv 2>&1 \ + && go install github.com/uudashr/gopkgs/v2/cmd/gopkgs@latest \ + && go install github.com/ramya-rao-a/go-outline@latest \ + && go install github.com/acroca/go-symbols@latest \ + && go install github.com/godoctor/godoctor@latest \ + && go install golang.org/x/tools/cmd/gorename@latest \ + && go install github.com/rogpeppe/godef@latest \ + && go install github.com/zmb3/gogetdoc@latest \ + && go install github.com/haya14busa/goplay/cmd/goplay@latest \ + && go install github.com/sqs/goreturns@latest \ + && go install github.com/josharian/impl@latest \ + && go install github.com/davidrjenni/reftools/cmd/fillstruct@latest \ + && go install github.com/fatih/gomodifytags@latest \ + && go install github.com/cweill/gotests/...@latest \ + && go install golang.org/x/tools/cmd/goimports@latest \ + && go install golang.org/x/lint/golint@latest \ + && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest \ + && go install github.com/mgechev/revive@latest \ + && go install github.com/go-delve/delve/cmd/dlv@latest \ && go install honnef.co/go/tools/cmd/staticcheck@latest \ && go install golang.org/x/tools/gopls@latest \ # Protocol Buffer Compiler @@ -61,8 +52,6 @@ RUN apt-get update \ && mv $HOME/.local/bin/protoc /usr/local/bin/protoc \ && mv $HOME/.local/include/ /usr/local/bin/include/ \ && protoc --version \ - # Install golangci-lint - && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2 \ # # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. && groupadd --gid $USER_GID $USERNAME \ @@ -91,9 +80,6 @@ RUN apt-get update \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* -# Enable go modules -ENV GO111MODULE=on - ENV OPERATOR_RELEASE_VERSION=v1.26.0 RUN ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac) \ && OS=$(uname | awk '{print tolower($0)}') \ diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml index a7db0620dbd..7fd93ee6752 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: - go-version: "1.21" + go-version: "1.22" - run: go version - name: Get branch name id: branch-name diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml index 59e363dd4d1..39be7f802da 100644 --- a/.github/workflows/main-build.yml +++ b/.github/workflows/main-build.yml @@ -13,7 +13,7 @@ jobs: id-token: write # needed for signing the images with GitHub OIDC Token **not production ready** # keda-tools is built from github.com/test-tools/tools/Dockerfile - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 steps: - name: Check out code uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 @@ -41,7 +41,7 @@ jobs: key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }} - name: Go modules sync - run: go mod tidy -compat=1.21 + run: go mod tidy -compat=1.22 - name: Test run: make test diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index b664b7b8b9b..f0938c38e2e 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -10,7 +10,7 @@ jobs: validate: name: validate - ${{ matrix.name }} runs-on: ${{ matrix.runner }} - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 strategy: matrix: include: @@ -54,7 +54,7 @@ jobs: key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }} - name: Go modules sync - run: go mod tidy -compat=1.21 + run: go mod tidy -compat=1.22 - name: Verify generated Clientset is up to date run: make clientset-verify @@ -77,9 +77,9 @@ jobs: validate-dockerfiles: name: validate-dockerfiles - ${{ matrix.name }} runs-on: ${{ matrix.runner }} - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 strategy: - matrix: + matrix: include: - runner: ARM64 name: arm64 @@ -108,9 +108,9 @@ jobs: validate-dev-container: name: Validate dev-container - ${{ matrix.name }} runs-on: ${{ matrix.runner }} - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 strategy: - matrix: + matrix: include: - runner: ARM64 name: arm64 @@ -143,7 +143,7 @@ jobs: python-version: 3.x - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: - go-version: "1.21" + go-version: "1.22" - name: Get golangci run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2 - uses: pre-commit/action@v3.0.0 diff --git a/.github/workflows/release-build.yml b/.github/workflows/release-build.yml index 88ae5c858ed..0e4523c6979 100644 --- a/.github/workflows/release-build.yml +++ b/.github/workflows/release-build.yml @@ -13,7 +13,7 @@ jobs: id-token: write # needed for signing the images with GitHub OIDC Token **not production ready** # keda-tools is built from github.com/test-tools/tools/Dockerfile - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 steps: - name: Check out code uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 @@ -41,7 +41,7 @@ jobs: key: ${{ runner.os }}-go-build-cache-${{ hashFiles('**/go.sum') }} - name: Go modules sync - run: go mod tidy -compat=1.21 + run: go mod tidy -compat=1.22 - name: Login to GitHub Container Registry uses: docker/login-action@v3 diff --git a/.github/workflows/static-analysis-codeql.yml b/.github/workflows/static-analysis-codeql.yml index 9181358e172..076a7857ad3 100644 --- a/.github/workflows/static-analysis-codeql.yml +++ b/.github/workflows/static-analysis-codeql.yml @@ -13,7 +13,7 @@ jobs: codeQl: name: Analyze CodeQL Go runs-on: ubuntu-latest - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 if: (github.actor != 'dependabot[bot]') steps: - name: Checkout repository diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml index de0e71159d0..e08a5b19c63 100644 --- a/.github/workflows/template-main-e2e-test.yml +++ b/.github/workflows/template-main-e2e-test.yml @@ -8,7 +8,7 @@ jobs: name: Run e2e test runs-on: ARM64 # keda-tools is built from github.com/test-tools/tools/Dockerfile - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.22.5 concurrency: e2e-tests steps: - name: Check out code diff --git a/.github/workflows/template-smoke-tests.yml b/.github/workflows/template-smoke-tests.yml index 582d0a14f0d..fb4da3a8f68 100644 --- a/.github/workflows/template-smoke-tests.yml +++ b/.github/workflows/template-smoke-tests.yml @@ -20,7 +20,7 @@ jobs: - name: Setup Go uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5 with: - go-version: "1.21" + go-version: "1.22" - name: Install prerequisites run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 3014dc32694..17beb5e715b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,6 +52,10 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ## Unreleased +- **General**: Fix CVE-2024-24790, CVE-2024-24789, and CVE-2024-24791 in stdlib. +- **General**: Fix CVE-2024-35255 in github.com/Azure/azure-sdk-for-go/sdk/azidentity +- **General**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp + ### Breaking Changes - **Authentication:** AAD-Pod-Identity and AWS-KIAM auths have been removed ([#5035](https://github.com/kedacore/keda/issues/5035)|[#5085](https://github.com/kedacore/keda/issues/5085)) diff --git a/Dockerfile b/Dockerfile index 07c0e63fb58..4835dc5c082 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.9 AS builder +FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.22.5 AS builder ARG BUILD_VERSION=main ARG GIT_COMMIT=HEAD diff --git a/Dockerfile.adapter b/Dockerfile.adapter index ba961577589..95279038e21 100644 --- a/Dockerfile.adapter +++ b/Dockerfile.adapter @@ -1,5 +1,5 @@ # Build the adapter binary -FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.9 AS builder +FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.22.5 AS builder ARG BUILD_VERSION=main ARG GIT_COMMIT=HEAD diff --git a/Dockerfile.webhooks b/Dockerfile.webhooks index c2a4c172ea0..5959c41afb0 100644 --- a/Dockerfile.webhooks +++ b/Dockerfile.webhooks @@ -1,5 +1,5 @@ # Build the manager binary -FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.9 AS builder +FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.22.5 AS builder ARG BUILD_VERSION=main ARG GIT_COMMIT=HEAD diff --git a/go.mod b/go.mod index bd0c6ebc1ec..6f0f5867438 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kedacore/keda/v2 -go 1.21 +go 1.22 require ( cloud.google.com/go/compute/metadata v0.3.0 @@ -11,10 +11,9 @@ require ( github.com/Azure/azure-amqp-common-go/v4 v4.2.0 github.com/Azure/azure-kusto-go v0.15.2 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 - github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 - github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.1.0 @@ -118,7 +117,10 @@ require ( sigs.k8s.io/kustomize/kustomize/v5 v5.4.1 ) -require filippo.io/edwards25519 v1.1.0 // indirect +require ( + filippo.io/edwards25519 v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect +) // Remove this when they merge the PR and cut a release https://github.com/open-policy-agent/cert-controller/pull/202 replace github.com/open-policy-agent/cert-controller => github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 @@ -172,8 +174,8 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/iam v1.1.7 // indirect code.cloudfoundry.org/clock v1.1.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 github.com/Azure/go-amqp v1.0.5 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect @@ -344,13 +346,13 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.23.0 + golang.org/x/crypto v0.24.0 golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sys v0.20.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.20.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/go.sum b/go.sum index 5b72c848fcc..b4733ecaa1f 100644 --- a/go.sum +++ b/go.sum @@ -1341,17 +1341,17 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbL github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 h1:rTfKOCZGy5ViVrlA74ZPE99a+SgoEE2K/yg3RyW9dFA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0 h1:ANFaLubuHo9lLoee/1La180t1frTwd+0FcaQh2GTlg8= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0/go.mod h1:x/yvhJANijv4JJOq6ql0QKMY2pP9zmjeWcSrZsRn9RY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 h1:d7S13DPk63SvBJfSUiMJJ26tRsvrBumkLPEfQEAarGk= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0/go.mod h1:7e/gsXp4INB4k/vg0h3UOkYpDK6oZqctxr+L05FGybg= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 h1:0f6XnzroY1yCQQwxGf/n/2xlaBF02Qhof2as99dGNsY= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1/go.mod h1:vMGz6NOUGJ9h5ONl2kkyaqq5E0g7s4CHNSrXN5fl8UY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 h1:QISzMrspEvZj4zrrN2wlNwfum5RmnKQhQNiSujwH7oU= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0/go.mod h1:xNjFERdhyMqZncbNJSPBsTCddk5kwsUVUzELQPMj/LA= github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0 h1:l+LIDHsZkFBiipIKhOn3m5/2MX4bwNwHYWyNulPaTis= github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery v1.1.0/go.mod h1:BjVVBLUiZ/qR2a4PAhjs8uGXNfStD0tSxgxCMfcVRT8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 h1:+dggnR89/BIIlRlQ6d19dkhhdd/mQUiQbXhyHUFiB4w= @@ -2665,8 +2665,8 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore new file mode 100644 index 00000000000..8cdb9103650 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore @@ -0,0 +1,4 @@ +# live test artifacts +Dockerfile +k8s.yaml +sshkey* diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index f6749c03059..a8c2feb6d47 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,59 @@ # Release History +## 1.7.0 (2024-06-20) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipelines service connection with + workload identity federation + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.7.0-beta.1 +* Removed the persistent token caching API. It will return in v1.8.0-beta.1 + +## 1.7.0-beta.1 (2024-06-10) + +### Features Added +* Restored `AzurePipelinesCredential` and persistent token caching API + +## Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Values which `NewAzurePipelinesCredential` read from environment variables in + prior versions are now parameters +* Renamed `AzurePipelinesServiceConnectionCredentialOptions` to `AzurePipelinesCredentialOptions` + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0 (2024-06-10) + +### Features Added +* `NewOnBehalfOfCredentialWithClientAssertions` creates an on-behalf-of credential + that authenticates with client assertions such as federated credentials + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Removed `AzurePipelinesCredential` and the persistent token caching API. + They will return in v1.7.0-beta.1 + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0-beta.4 (2024-05-14) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipeline service connection with + workload identity federation + +## 1.6.0-beta.3 (2024-04-09) + +### Breaking Changes +* `DefaultAzureCredential` now sends a probe request with no retries for IMDS managed identity + environments to avoid excessive retry delays when the IMDS endpoint is not available. This + should improve credential chain resolution for local development scenarios. + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + ## 1.5.2 (2024-04-09) ### Bugs Fixed @@ -9,6 +63,28 @@ * Restored v1.4.0 error behavior for empty tenant IDs * Upgraded dependencies +## 1.6.0-beta.2 (2024-02-06) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.1 +* Replaced `ErrAuthenticationRequired` with `AuthenticationRequiredError`, a struct + type that carries the `TokenRequestOptions` passed to the `GetToken` call which + returned the error. + +### Bugs Fixed +* Fixed more cases in which credential chains like `DefaultAzureCredential` + should try their next credential after attempting managed identity + authentication in a Docker Desktop container + +### Other Changes +* `AzureCLICredential` uses the CLI's `expires_on` value for token expiration + +## 1.6.0-beta.1 (2024-01-17) + +### Features Added +* Restored persistent token caching API first added in v1.5.0-beta.1 +* Added `AzureCLICredentialOptions.Subscription` + ## 1.5.1 (2024-01-17) ### Bugs Fixed @@ -135,7 +211,7 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for - [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + [Continuous Access Evaluation (CAE)](https://learn.microsoft.com/entra/identity-platform/app-resilience-continuous-access-evaluation). This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 1a649202303..4404be82449 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index b6ad2d39f84..7e201ea2fdb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -30,7 +30,7 @@ When debugging and executing code locally, developers typically use their own ac #### Authenticating via the Azure CLI `DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user -signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. +signed in to the [Azure CLI](https://learn.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. @@ -69,14 +69,14 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ## Managed Identity `DefaultAzureCredential` and `ManagedIdentityCredential` support -[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +[managed identity authentication](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview) in any hosting environment which supports managed identities, such as (this list is not exhaustive): -* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) -* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) -* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) -* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) -* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) -* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) +* [Azure App Service](https://learn.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://learn.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://learn.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token) ## Examples @@ -140,6 +140,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- +|[AzurePipelinesCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzurePipelinesCredential)|Authenticate an Azure Pipelines [service connection](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) |[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion |[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate |[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret @@ -207,7 +208,7 @@ For more details, see the [token caching documentation](https://aka.ms/azsdk/go/ Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes). ### Logging diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index c0d6601469c..fbaa2922048 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -45,7 +45,7 @@ With persistent disk token caching enabled, the library first determines if a va #### Example code -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. ### Credentials supporting token caching @@ -57,6 +57,7 @@ The following table indicates the state of in-memory and persistent caching in e |--------------------------------|---------------------------------------------------------------------|--------------------------| | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `AzurePipelinesCredential` | Supported | Supported | | `ClientAssertionCredential` | Supported | Supported | | `ClientCertificateCredential` | Supported | Supported | | `ClientSecretCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 832c599eb90..54016a07098 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -10,6 +10,7 @@ This troubleshooting guide covers failure investigation techniques, common error - [Enable and configure logging](#enable-and-configure-logging) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) +- [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) - [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) - [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) - [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) @@ -58,7 +59,7 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. - __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. @@ -97,17 +98,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -123,20 +124,20 @@ azlog.SetEvents(azidentity.EventAuthentication) |Host Environment| | | |---|---|---| -|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| -|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| |Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| -|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| -|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| +|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)|| ### Azure Virtual Machine managed identity | Error Message |Description| Mitigation | |---|---|---| -|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| |The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| -|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| -|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
  • Refer to the error message for more details on specific failures.
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|
  • Refer to the error message for more details on specific failures.
  • Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
  • Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
| #### Verify IMDS is available on the VM @@ -152,7 +153,7 @@ curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://man | Error Message |Description| Mitigation | |---|---|---| -|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
  • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
  • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|
  • Ensure the App Service is configured for managed identity as described in [App Service documentation](https://learn.microsoft.com/azure/app-service/overview-managed-identity).
  • Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
| #### Verify the App Service managed identity endpoint is available @@ -177,8 +178,8 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio | Error Message |Description| Mitigation | |---|---|---| -|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
  • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
  • Validate the installation location is in the application's `PATH` environment variable.
| -|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
  • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
  • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|
  • Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
  • Validate the installation location is in the application's `PATH` environment variable.
| +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|
  • Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
  • Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
| #### Verify the Azure CLI can obtain tokens @@ -226,6 +227,15 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul |---|---|---| |no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
  • If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
  • If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions` + +## Troubleshoot AzurePipelinesCredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +| AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.| +| No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| +|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| + ## Get additional help Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 1be55a4bdd3..bff0c44dac4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_98074050dc" + "Tag": "go/azidentity_087379b475" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 43577ab3c5f..b9976f5fede 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -35,9 +35,9 @@ type AzureCLICredentialOptions struct { // logged in account can access. AdditionallyAllowedTenants []string - // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other // than the Azure CLI's current account. - subscription string + Subscription string // TenantID identifies the tenant the credential should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. @@ -68,9 +68,9 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if options != nil { cp = *options } - for _, r := range cp.subscription { + for _, r := range cp.Subscription { if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { - return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription) + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription) } } if cp.TenantID != "" && !validTenantID(cp.TenantID) { @@ -97,7 +97,7 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ } c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription) + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription) if err == nil { at, err = c.createAccessToken(b) } @@ -163,26 +163,21 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes [] func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { t := struct { - AccessToken string `json:"accessToken"` - Authority string `json:"_authority"` - ClientID string `json:"_clientId"` - ExpiresOn string `json:"expiresOn"` - IdentityProvider string `json:"identityProvider"` - IsMRRT bool `json:"isMRRT"` - RefreshToken string `json:"refreshToken"` - Resource string `json:"resource"` - TokenType string `json:"tokenType"` - UserID string `json:"userId"` + AccessToken string `json:"accessToken"` + Expires_On int64 `json:"expires_on"` + ExpiresOn string `json:"expiresOn"` }{} err := json.Unmarshal(tk, &t) if err != nil { return azcore.AccessToken{}, err } - // the Azure CLI's "expiresOn" is local time - exp, err := time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) - if err != nil { - return azcore.AccessToken{}, fmt.Errorf("Error parsing token expiration time %q: %v", t.ExpiresOn, err) + exp := time.Unix(t.Expires_On, 0) + if t.Expires_On == 0 { + exp, err = time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("%s: error parsing token expiration time %q: %v", credNameAzureCLI, t.ExpiresOn, err) + } } converted := azcore.AccessToken{ diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go new file mode 100644 index 00000000000..80c1806bb18 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +const ( + credNameAzurePipelines = "AzurePipelinesCredential" + oidcAPIVersion = "7.1" + systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" +) + +// AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See +// [Azure Pipelines documentation] for more information. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/library/connect-to-azure?view=azure-devops#create-an-azure-resource-manager-service-connection-that-uses-workload-identity-federation +type AzurePipelinesCredential struct { + connectionID, oidcURI, systemAccessToken string + cred *ClientAssertionCredential +} + +// AzurePipelinesCredentialOptions contains optional parameters for AzurePipelinesCredential. +type AzurePipelinesCredentialOptions struct { + azcore.ClientOptions + + // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. + // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the + // application is registered. + AdditionallyAllowedTenants []string + + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool +} + +// NewAzurePipelinesCredential is the constructor for AzurePipelinesCredential. +// +// - tenantID: tenant ID of the service principal federated with the service connection +// - clientID: client ID of that service principal +// - serviceConnectionID: ID of the service connection to authenticate +// - systemAccessToken: security token for the running build. See [Azure Pipelines documentation] for +// an example showing how to get this value. +// +// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken +func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, systemAccessToken string, options *AzurePipelinesCredentialOptions) (*AzurePipelinesCredential, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + if clientID == "" { + return nil, errors.New("no client ID specified") + } + if serviceConnectionID == "" { + return nil, errors.New("no service connection ID specified") + } + if systemAccessToken == "" { + return nil, errors.New("no system access token specified") + } + u := os.Getenv(systemOIDCRequestURI) + if u == "" { + return nil, fmt.Errorf("no value for environment variable %s. This should be set by Azure Pipelines", systemOIDCRequestURI) + } + a := AzurePipelinesCredential{ + connectionID: serviceConnectionID, + oidcURI: u, + systemAccessToken: systemAccessToken, + } + if options == nil { + options = &AzurePipelinesCredentialOptions{} + } + caco := ClientAssertionCredentialOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + cred, err := NewClientAssertionCredential(tenantID, clientID, a.getAssertion, &caco) + if err != nil { + return nil, err + } + cred.client.name = credNameAzurePipelines + a.cred = cred + return &a, nil +} + +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. +func (a *AzurePipelinesCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAzurePipelines+"."+traceOpGetToken, a.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := a.cred.GetToken(ctx, opts) + return tk, err +} + +func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, error) { + url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID + url, err := runtime.EncodeQueryParams(url) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + } + req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + res, err := doForClient(a.cred.client.azClient, req) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + } + if res.StatusCode != http.StatusOK { + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + // include the response because its body, if any, probably contains an error message. + // OK responses aren't included with errors because they probably contain secrets + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + } + b, err := runtime.Payload(res) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + } + var r struct { + OIDCToken string `json:"oidcToken"` + } + err = json.Unmarshal(b, &r) + if err != nil { + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + } + return r.OIDCToken, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index dc855edf786..6c35a941b97 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -86,7 +86,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token errs []error successfulCredential azcore.TokenCredential token azcore.AccessToken - unavailableErr *credentialUnavailableError + unavailableErr credentialUnavailable ) for _, cred := range c.sources { token, err = cred.GetToken(ctx, opts) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index d077682c5c2..4cd8c514473 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -8,7 +8,7 @@ trigger: - release/* paths: include: - - sdk/azidentity/ + - sdk/azidentity/ pr: branches: @@ -19,17 +19,28 @@ pr: - release/* paths: include: - - sdk/azidentity/ + - sdk/azidentity/ -stages: -- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml - parameters: - RunLiveTests: true - UsePipelineProxy: false - ServiceDirectory: 'azidentity' - CloudConfig: - Public: - SubscriptionConfigurations: - - $(sub-config-azure-cloud-test-resources) - # Contains alternate tenant, AAD app and cert info for testing - - $(sub-config-identity-test-resources) +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + CloudConfig: + Public: + SubscriptionConfigurations: + - $(sub-config-azure-cloud-test-resources) + - $(sub-config-identity-test-resources) + EnvVars: + SYSTEM_ACCESSTOKEN: $(System.AccessToken) + RunLiveTests: true + ServiceDirectory: azidentity + UsePipelineProxy: false + + ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + MatrixConfigs: + - Name: managed_identity_matrix + GenerateVMJobs: true + Path: sdk/azidentity/managed-identity-matrix.json + Selection: sparse + MatrixReplace: + - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi + - OSVmImage=.*LINUXNEXTVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index fc3df68eb19..b588750ef33 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -23,7 +23,7 @@ const credNameAssertion = "ClientAssertionCredential" // the most common assertion scenario, authenticating a service principal with a certificate. See // [Microsoft Entra ID documentation] for details of the assertion format. // -// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/certificate-credentials#assertion-format type ClientAssertionCredential struct { client *confidentialClient } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 607533f486e..80cd96b560f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -51,7 +51,8 @@ type ClientCertificateCredential struct { client *confidentialClient } -// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. +// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. See +// [ParseCertificates] for help loading a certificate. func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) { if len(certs) == 0 { return nil, errors.New("at least one certificate is required") @@ -86,8 +87,10 @@ func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy. return tk, err } -// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. -// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format. +// ParseCertificates loads certificates and a private key, in PEM or PKCS#12 format, for use with [NewClientCertificateCredential]. +// Pass nil for password if the private key isn't encrypted. This function has limitations, for example it can't decrypt keys in +// PEM format or PKCS#12 certificates that use SHA256 for message authentication. If you encounter such limitations, consider +// using another module to load the certificate and private key. func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) { var blocks []*pem.Block var err error diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 854267bdbfd..3bd08c685fb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -91,7 +91,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque } tro.TenantID = tenant } - client, mu, err := c.client(ctx, tro) + client, mu, err := c.client(tro) if err != nil { return azcore.AccessToken{}, err } @@ -109,7 +109,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque if err != nil { // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. - var unavailableErr *credentialUnavailableError + var unavailableErr credentialUnavailable if !errors.As(err, &unavailableErr) { res := getResponseFromError(err) err = newAuthenticationFailedError(c.name, err.Error(), res, err) @@ -121,7 +121,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } -func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { +func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { c.clientMu.Lock() defer c.clientMu.Unlock() if tro.EnableCAE { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 35aeef86747..551d3199462 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -8,10 +8,8 @@ package azidentity import ( "context" - "errors" "os" "strings" - "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -98,13 +96,13 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) } - o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true} if ID, ok := os.LookupEnv(azureClientID); ok { o.ID = ClientID(ID) } miCred, err := NewManagedIdentityCredential(o) if err == nil { - creds = append(creds, &timeoutWrapper{mic: miCred, timeout: time.Second}) + creds = append(creds, miCred) } else { errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error()) creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err}) @@ -158,51 +156,10 @@ type defaultCredentialErrorReporter struct { } func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - if _, ok := d.err.(*credentialUnavailableError); ok { + if _, ok := d.err.(credentialUnavailable); ok { return azcore.AccessToken{}, d.err } return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error()) } var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil) - -// timeoutWrapper prevents a potentially very long timeout when managed identity isn't available -type timeoutWrapper struct { - mic *ManagedIdentityCredential - // timeout applies to all auth attempts until one doesn't time out - timeout time.Duration -} - -// GetToken wraps DefaultAzureCredential's initial managed identity auth attempt with a short timeout -// because managed identity may not be available and connecting to IMDS can take several minutes to time out. -func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - var tk azcore.AccessToken - var err error - // no need to synchronize around this value because it's written only within ChainedTokenCredential's critical section - if w.timeout > 0 { - c, cancel := context.WithTimeout(ctx, w.timeout) - defer cancel() - tk, err = w.mic.GetToken(c, opts) - if isAuthFailedDueToContext(err) { - err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information") - } else { - // some managed identity implementation is available, so don't apply the timeout to future calls - w.timeout = 0 - } - } else { - tk, err = w.mic.GetToken(ctx, opts) - } - return tk, err -} - -// unwraps nested AuthenticationFailedErrors to get the root error -func isAuthFailedDueToContext(err error) bool { - for { - var authFailedErr *AuthenticationFailedError - if !errors.As(err, &authFailedErr) { - break - } - err = authFailedErr.err - } - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go index d8b952f532e..be963d3a2af 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -19,7 +19,7 @@ const cliTimeout = 10 * time.Second // the next credential in its chain (another developer credential). func unavailableIfInChain(err error, inDefaultChain bool) error { if err != nil && inDefaultChain { - var unavailableErr *credentialUnavailableError + var unavailableErr credentialUnavailable if !errors.As(err, &unavailableErr) { err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 1b7a283703a..cd30bedd5e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -34,8 +34,8 @@ type DeviceCodeCredentialOptions struct { ClientID string // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user - // interaction is necessary to acquire a token. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. disableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 42f84875e23..b30f5474f55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -57,6 +57,9 @@ type EnvironmentCredentialOptions struct { // // AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file. // +// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this +// function isn't able to parse your certificate, use [ClientCertificateCredential] instead. +// // # User with username and password // // AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". @@ -121,7 +124,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme } certs, key, err := ParseCertificates(certData, password) if err != nil { - return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err) + return nil, fmt.Errorf("failed to parse %q due to error %q. This may be due to a limitation of this module's certificate loader. Consider calling NewClientCertificateCredential instead", certPath, err.Error()) } o := &ClientCertificateCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 335d2b7dcf2..35fa01d136e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -13,15 +13,12 @@ import ( "fmt" "net/http" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) -// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token -// because user interaction is required and the credential is configured not to automatically prompt the user. -var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"} - // getResponseFromError retrieves the response carried by // an AuthenticationFailedError or MSAL CallErr, if any func getResponseFromError(err error) *http.Response { @@ -56,7 +53,7 @@ func (e *AuthenticationFailedError) Error() string { return e.credType + ": " + e.message } msg := &bytes.Buffer{} - fmt.Fprintf(msg, e.credType+" authentication failed\n") + fmt.Fprintf(msg, "%s authentication failed. %s\n", e.credType, e.message) if e.RawResponse.Request != nil { fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) } else { @@ -86,6 +83,8 @@ func (e *AuthenticationFailedError) Error() string { anchor = "azure-cli" case credNameAzureDeveloperCLI: anchor = "azd" + case credNameAzurePipelines: + anchor = "apc" case credNameCert: anchor = "client-cert" case credNameSecret: @@ -110,8 +109,34 @@ func (*AuthenticationFailedError) NonRetriable() { var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) -// credentialUnavailableError indicates a credential can't attempt authentication because it lacks required -// data or state +// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// because the credential requires user interaction and is configured not to request it automatically. +type authenticationRequiredError struct { + credentialUnavailableError + + // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. + TokenRequestOptions policy.TokenRequestOptions +} + +func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &authenticationRequiredError{ + credentialUnavailableError: credentialUnavailableError{ + credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", + }, + TokenRequestOptions: tro, + } +} + +var ( + _ credentialUnavailable = (*authenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) +) + +type credentialUnavailable interface { + error + credentialUnavailable() +} + type credentialUnavailableError struct { message string } @@ -135,6 +160,11 @@ func (e *credentialUnavailableError) Error() string { } // NonRetriable is a marker method indicating this error should not be retried. It has no implementation. -func (e *credentialUnavailableError) NonRetriable() {} +func (*credentialUnavailableError) NonRetriable() {} + +func (*credentialUnavailableError) credentialUnavailable() {} -var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) +var ( + _ credentialUnavailable = (*credentialUnavailableError)(nil) + _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil) +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum index 65bcba7dfea..c592f283b6b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -3,12 +3,20 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9an github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -16,14 +24,19 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -35,7 +48,13 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index bd829698375..056785a8a33 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -33,8 +33,8 @@ type InteractiveBrowserCredentialOptions struct { ClientID string // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when - // user interaction is necessary to acquire a token. + // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // to acquire a token. disableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json new file mode 100644 index 00000000000..1c3791777a1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json @@ -0,0 +1,17 @@ +{ + "include": [ + { + "Agent": { + "msi_image": { + "ArmTemplateParameters": "@{deployResources = $true}", + "OSVmImage": "env:LINUXNEXTVMIMAGE", + "Pool": "env:LINUXPOOL" + } + }, + "GoVersion": [ + "1.22.1" + ], + "IDENTITY_IMDS_AVAILABLE": "1" + } + ] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index d129a1e91c2..6122cc70050 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -14,13 +14,15 @@ import ( "net/http" "net/url" "os" + "path/filepath" + "runtime" "strconv" "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" @@ -44,6 +46,8 @@ const ( serviceFabricAPIVersion = "2019-07-01-preview" ) +var imdsProbeTimeout = time.Second + type msiType int const ( @@ -55,13 +59,28 @@ const ( msiTypeServiceFabric ) -// managedIdentityClient provides the base for authenticating in managed identity environments -// This type includes an runtime.Pipeline and TokenCredentialOptions. type managedIdentityClient struct { - azClient *azcore.Client - msiType msiType - endpoint string - id ManagedIDKind + azClient *azcore.Client + endpoint string + id ManagedIDKind + msiType msiType + probeIMDS bool +} + +// arcKeyDirectory returns the directory expected to contain Azure Arc keys +var arcKeyDirectory = func() (string, error) { + switch runtime.GOOS { + case "linux": + return "/var/opt/azcmagent/tokens", nil + case "windows": + pd := os.Getenv("ProgramData") + if pd == "" { + return "", errors.New("environment variable ProgramData has no value") + } + return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil + default: + return "", fmt.Errorf("unsupported OS %q", runtime.GOOS) + } } type wrappedNumber json.Number @@ -88,7 +107,7 @@ func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { if o.StatusCodes == nil { o.StatusCodes = []int{ // IMDS docs recommend retrying 404, 410, 429 and 5xx - // https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + // https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token#error-handling http.StatusNotFound, // 404 http.StatusGone, // 410 http.StatusTooManyRequests, // 429 @@ -147,11 +166,12 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeCloudShell } } else { + c.probeIMDS = options.dac setIMDSRetryOptionDefaults(&cp.Retry) } - client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ - Tracing: runtime.TracingOptions{ + client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{ + Tracing: azruntime.TracingOptions{ Namespace: traceNamespace, }, }, &cp) @@ -180,6 +200,27 @@ func (c *managedIdentityClient) provideToken(ctx context.Context, params confide // authenticate acquires an access token func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { + // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, + // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block + if c.probeIMDS { + cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) + defer cancel() + cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) + req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) + if err == nil { + _, err = c.azClient.Pipeline().Do(req) + } + if err != nil { + msg := err.Error() + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) + } + // send normal token requests from now on because something responded + c.probeIMDS = false + } + msg, err := c.createAuthRequest(ctx, id, scopes) if err != nil { return azcore.AccessToken{}, err @@ -190,7 +231,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) } - if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { return c.createAccessToken(resp) } @@ -201,15 +242,15 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) } msg := "failed to authenticate a system assigned identity" - if body, err := runtime.Payload(resp); err == nil && len(body) > 0 { + if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { msg += fmt.Sprintf(". The endpoint responded with %s", body) } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) case http.StatusForbidden: // Docker Desktop runs a proxy that responds 403 to IMDS token requests. If we get that response, // we return credentialUnavailableError so credential chains continue to their next credential - body, err := runtime.Payload(resp) - if err == nil && strings.Contains(string(body), "A socket operation was attempted to an unreachable network") { + body, err := azruntime.Payload(resp) + if err == nil && strings.Contains(string(body), "unreachable") { return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } @@ -226,7 +267,7 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string }{} - if err := runtime.UnmarshalAsJSON(res, &value); err != nil { + if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) } if value.ExpiresIn != "" { @@ -276,7 +317,7 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage } func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -296,7 +337,7 @@ func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id Ma } func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -316,7 +357,7 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, } func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -339,7 +380,7 @@ func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id } func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -362,7 +403,7 @@ func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Conte func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { // create the request to retreive the secret key challenge provided by the HIMDS service - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return "", err } @@ -384,22 +425,36 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour } header := response.Header.Get("WWW-Authenticate") if len(header) == 0 { - return "", errors.New("did not receive a value from WWW-Authenticate header") + return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) } // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key - pos := strings.LastIndex(header, "=") - if pos == -1 { - return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header) + _, p, found := strings.Cut(header, "=") + if !found { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) + } + expected, err := arcKeyDirectory() + if err != nil { + return "", err + } + if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { + return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) + } + f, err := os.Stat(p) + if err != nil { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) + } + if s := f.Size(); s > 4096 { + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) } - key, err := os.ReadFile(header[pos+1:]) + key, err := os.ReadFile(p) if err != nil { - return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err) + return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) } return string(key), nil } func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { return nil, err } @@ -421,7 +476,7 @@ func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, i } func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint) + request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index dcd278befa1..13c043d8e0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -64,12 +64,19 @@ type ManagedIdentityCredentialOptions struct { // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that // some platforms don't accept resource IDs. ID ManagedIDKind + + // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have + // configuration for a specific managed identity API, the credential tries to determine whether IMDS is available before + // sending its first token request. It does this by sending a malformed request with a short timeout. Any response to that + // request is taken to mean IMDS is available, in which case the credential will send ordinary token requests thereafter + // with no special timeout. The purpose of this behavior is to prevent a very long timeout when IMDS isn't available. + dac bool } // ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a // user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: -// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 5e67cf02145..9dcc82f013b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -10,6 +10,7 @@ import ( "context" "crypto" "crypto/x509" + "errors" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" @@ -24,7 +25,7 @@ const credNameOBO = "OnBehalfOfCredential" // is not an interactive authentication flow, an application using it must have admin consent for any delegated // permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. // -// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { client *confidentialClient } @@ -60,6 +61,19 @@ func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion st return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) } +// NewOnBehalfOfCredentialWithClientAssertions constructs an OnBehalfOfCredential that authenticates with client assertions. +// userAssertion is the user's access token for the application. The getAssertion function should return client assertions +// that authenticate the application to Microsoft Entra ID, such as federated credentials. +func NewOnBehalfOfCredentialWithClientAssertions(tenantID, clientID, userAssertion string, getAssertion func(context.Context) (string, error), options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { + if getAssertion == nil { + return nil, errors.New("getAssertion can't be nil. It must be a function that returns client assertions") + } + cred := confidential.NewCredFromAssertionCallback(func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) { + return getAssertion(ctx) + }) + return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options) +} + // NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret. func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) { cred, err := confidential.NewCredFromSecret(clientSecret) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 63c31190d18..b3d22dbf3ce 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -152,7 +152,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti return p.token(ar, err) } if p.opts.DisableAutomaticAuthentication { - return azcore.AccessToken{}, errAuthenticationRequired + return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) } at, err := p.reqToken(ctx, client, tro) if err == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 new file mode 100644 index 00000000000..a69bbce34c4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + +param ( + [hashtable] $AdditionalParameters = @{}, + [hashtable] $DeploymentOutputs +) + +$ErrorActionPreference = 'Stop' +$PSNativeCommandUseErrorActionPreference = $true + +if ($CI) { + if (!$AdditionalParameters['deployResources']) { + Write-Host "Skipping post-provisioning script because resources weren't deployed" + return + } + az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] + az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] +} + +Write-Host "Building container" +$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" +Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +ENV GOARCH=amd64 GOWORK=off +COPY . /azidentity +WORKDIR /azidentity/testdata/managed-id-test +RUN go mod tidy +RUN go build -o /build/managed-id-test . +RUN GOOS=windows go build -o /build/managed-id-test.exe . + +FROM mcr.microsoft.com/mirror/docker/library/alpine:3.16 +RUN apk add gcompat +COPY --from=builder /build/* . +RUN chmod +x managed-id-test +CMD ["./managed-id-test"] +"@ +# build from sdk/azidentity because we need that dir in the context (because the test app uses local azidentity) +docker build -t $image "$PSScriptRoot" +az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME'] +docker push $image + +$rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP'] + +# ACI is easier to provision here than in the bicep file because the image isn't available before now +Write-Host "Deploying Azure Container Instance" +$aciName = "azidentity-test" +az container create -g $rg -n $aciName --image $image ` + --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --role "Storage Blob Data Reader" ` + --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` + -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 +Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" + +# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip +Write-Host "Deploying to Azure Functions" +$container = docker create $image +docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/" +docker rm -v $container +Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force +az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip + +Write-Host "Creating federated identity" +$aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME'] +$idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME'] +$issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv +$podName = "azidentity-test" +$serviceAccountName = "workload-identity-sa" +az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName +Write-Host "Deploying to AKS" +az aks get-credentials -g $rg -n $aksName +az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName +Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @" +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + azure.workload.identity/client-id: $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) + name: $serviceAccountName + namespace: default +--- +apiVersion: v1 +kind: Pod +metadata: + name: $podName + namespace: default + labels: + app: $podName + azure.workload.identity/use: "true" +spec: + serviceAccountName: $serviceAccountName + containers: + - name: $podName + image: $image + env: + - name: AZIDENTITY_STORAGE_NAME + value: $($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) + - name: AZIDENTITY_USE_WORKLOAD_IDENTITY + value: "true" + - name: FUNCTIONS_CUSTOMHANDLER_PORT + value: "80" + nodeSelector: + kubernetes.io/os: linux +"@ +kubectl apply -f "$PSScriptRoot/k8s.yaml" +Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 index fe0183addeb..58766d0a022 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 @@ -1,36 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root. + [CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] param ( + [hashtable] $AdditionalParameters = @{}, + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). [Parameter(ValueFromRemainingArguments = $true)] $RemainingArguments ) +if (-not (Test-Path "$PSScriptRoot/sshkey.pub")) { + ssh-keygen -t rsa -b 4096 -f "$PSScriptRoot/sshkey" -N '' -C '' +} +$templateFileParameters['sshPubKey'] = Get-Content "$PSScriptRoot/sshkey.pub" + if (!$CI) { # TODO: Remove this once auto-cloud config downloads are supported locally Write-Host "Skipping cert setup in local testing mode" return } -if ($EnvironmentVariables -eq $null -or $EnvironmentVariables.Count -eq 0) { +if ($null -eq $EnvironmentVariables -or $EnvironmentVariables.Count -eq 0) { throw "EnvironmentVariables must be set in the calling script New-TestResources.ps1" } $tmp = $env:TEMP ? $env:TEMP : [System.IO.Path]::GetTempPath() $pfxPath = Join-Path $tmp "test.pfx" $pemPath = Join-Path $tmp "test.pem" -$sniPath = Join-Path $tmp "testsni.pfx" -Write-Host "Creating identity test files: $pfxPath $pemPath $sniPath" +Write-Host "Creating identity test files: $pfxPath $pemPath" [System.Convert]::FromBase64String($EnvironmentVariables['PFX_CONTENTS']) | Set-Content -Path $pfxPath -AsByteStream Set-Content -Path $pemPath -Value $EnvironmentVariables['PEM_CONTENTS'] -[System.Convert]::FromBase64String($EnvironmentVariables['SNI_CONTENTS']) | Set-Content -Path $sniPath -AsByteStream # Set for pipeline Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PFX;]$pfxPath" Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PEM;]$pemPath" -Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_SNI;]$sniPath" # Set for local $env:IDENTITY_SP_CERT_PFX = $pfxPath $env:IDENTITY_SP_CERT_PEM = $pemPath -$env:IDENTITY_SP_CERT_SNI = $sniPath diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index b3490d3b50a..2a216529309 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -1 +1,219 @@ -param baseName string +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +@description('Kubernetes cluster admin user name.') +param adminUser string = 'azureuser' + +@minLength(6) +@maxLength(23) +@description('The base resource name.') +param baseName string = resourceGroup().name + +@description('Whether to deploy resources. When set to false, this file deploys nothing.') +param deployResources bool = false + +param sshPubKey string = '' + +@description('The location of the resource. By default, this is the same as the resource group.') +param location string = resourceGroup().location + +// https://learn.microsoft.com/azure/role-based-access-control/built-in-roles +var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') +var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1') + +resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource saUserAssigned 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) { + kind: 'StorageV2' + location: location + name: 'sa2${uniqueString(baseName)}' + properties: { + accessTier: 'Hot' + } + sku: { + name: 'Standard_LRS' + } +} + +resource usermgdid 'Microsoft.ManagedIdentity/userAssignedIdentities@2018-11-30' = if (deployResources) { + location: location + name: baseName +} + +resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, acrPull, 'containerInstance') + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: acrPull + } + scope: containerRegistry +} + +resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + scope: saUserAssigned + name: guid(resourceGroup().id, blobReader, usermgdid.id) + properties: { + principalId: deployResources ? usermgdid.properties.principalId : '' + principalType: 'ServicePrincipal' + roleDefinitionId: blobReader + } +} + +resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) { + name: guid(resourceGroup().id, blobReader, 'azfunc') + properties: { + principalId: deployResources ? azfunc.identity.principalId : '' + roleDefinitionId: blobReader + principalType: 'ServicePrincipal' + } + scope: sa +} + +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) { + location: location + name: uniqueString(resourceGroup().id) + properties: { + adminUserEnabled: true + } + sku: { + name: 'Basic' + } +} + +resource farm 'Microsoft.Web/serverfarms@2021-03-01' = if (deployResources) { + kind: 'app' + location: location + name: '${baseName}_asp' + properties: {} + sku: { + capacity: 1 + family: 'B' + name: 'B1' + size: 'B1' + tier: 'Basic' + } +} + +resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { + identity: { + type: 'SystemAssigned, UserAssigned' + userAssignedIdentities: { + '${deployResources ? usermgdid.id : ''}': {} + } + } + kind: 'functionapp' + location: location + name: '${baseName}func' + properties: { + enabled: true + httpsOnly: true + keyVaultReferenceIdentity: 'SystemAssigned' + serverFarmId: farm.id + siteConfig: { + alwaysOn: true + appSettings: [ + { + name: 'AZIDENTITY_STORAGE_NAME' + value: deployResources ? sa.name : null + } + { + name: 'AZIDENTITY_STORAGE_NAME_USER_ASSIGNED' + value: deployResources ? saUserAssigned.name : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' + value: deployResources ? usermgdid.id : null + } + { + name: 'AzureWebJobsStorage' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'FUNCTIONS_EXTENSION_VERSION' + value: '~4' + } + { + name: 'FUNCTIONS_WORKER_RUNTIME' + value: 'custom' + } + { + name: 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING' + value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' + } + { + name: 'WEBSITE_CONTENTSHARE' + value: toLower('${baseName}-func') + } + ] + http20Enabled: true + minTlsVersion: '1.2' + } + } +} + +resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deployResources) { + name: baseName + location: location + identity: { + type: 'SystemAssigned' + } + properties: { + agentPoolProfiles: [ + { + count: 1 + enableAutoScaling: false + kubeletDiskType: 'OS' + mode: 'System' + name: 'agentpool' + osDiskSizeGB: 128 + osDiskType: 'Managed' + osSKU: 'Ubuntu' + osType: 'Linux' + type: 'VirtualMachineScaleSets' + vmSize: 'Standard_D2s_v3' + } + ] + dnsPrefix: 'identitytest' + enableRBAC: true + linuxProfile: { + adminUsername: adminUser + ssh: { + publicKeys: [ + { + keyData: sshPubKey + } + ] + } + } + oidcIssuerProfile: { + enabled: true + } + securityProfile: { + workloadIdentity: { + enabled: true + } + } + } +} + +output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : '' +output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : '' +output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : '' +output AZIDENTITY_FUNCTION_NAME string = deployResources ? azfunc.name : '' +output AZIDENTITY_STORAGE_ID string = deployResources ? sa.id : '' +output AZIDENTITY_STORAGE_NAME string = deployResources ? sa.name : '' +output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAssigned.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 9b9d7ae0d20..4305b5d3d80 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.5.2" + version = "v1.7.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go index fd89cab620c..08e8f005d2a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal/version.go @@ -14,5 +14,5 @@ const ( ModuleName = "azeventgrid" // ModuleVersion is the semantic version (see http://semver.org) of this module. - ModuleVersion = "v0.5.0" + ModuleVersion = "v0.4.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md index fc77f82e2d0..873680e7be8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher/README.md @@ -1,7 +1,5 @@ # Azure Event Grid Publisher Client Module for Go -**Please note this package has been moved to: [azeventgrid](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/eventgrid/azeventgrid).** - [Azure Event Grid](https://learn.microsoft.com/azure/event-grid/overview) is a highly scalable, fully managed Pub Sub message distribution service that offers flexible message consumption patterns. For more information about Event Grid see: [link](https://learn.microsoft.com/azure/event-grid/overview). The client in this package can publish events to [Event Grid topics](https://learn.microsoft.com/azure/event-grid/concepts). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md index 988db23a29c..796215fa484 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/CHANGELOG.md @@ -1,11 +1,5 @@ # Release History -## 1.7.1 (2024-05-20) - -### Bugs Fixed - -- Emulator strings should allow for hosts other than localhost (PR#22898) - ## 1.7.0 (2024-04-02) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go index 4d11f9f1882..ce0e2610dfc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn/conn.go @@ -100,13 +100,14 @@ func ParseConnectionString(connStr string) (ConnectionStringProperties, error) { } if csp.Emulator { + // check that they're only connecting to localhost endpointParts := strings.SplitN(csp.Endpoint, ":", 3) // allow for a port, if it exists. - if len(endpointParts) < 2 || endpointParts[0] != "sb" { - // there should always be at least two parts "sb:" and "//" + if len(endpointParts) < 2 || endpointParts[0] != "sb" || endpointParts[1] != "//localhost" { + // there should always be at least two parts "sb:" and "//localhost" // with an optional 3rd piece that's the port "1111". // (we don't need to validate it's a valid host since it's been through url.Parse() above) - return ConnectionStringProperties{}, fmt.Errorf("UseDevelopmentEmulator=true can only be used with sb:// or sb://:, not %s", csp.Endpoint) + return ConnectionStringProperties{}, fmt.Errorf("UseDevelopmentEmulator=true can only be used with sb://localhost or sb://localhost:, not %s", csp.Endpoint) } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go index f3079017326..a2402e48ac3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/constants.go @@ -4,4 +4,4 @@ package internal // Version is the semantic version number -const Version = "v1.7.1" +const Version = "v1.7.0" diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der deleted file mode 100644 index 958f3cfaddf3645fa6c0578b5b6955d65ac4c172..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 998 zcmXqLVt!=M#B^!_GZP~dlZfxTkgMw#Mx5CteAhsdS=K*t&xAY!UN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Gec7&Lt`^z^C)p%6J+ina%mHz5^^vyvNA9?G4eAQ zG%<29H8CnfQWlH+MyjTjz z2Bjk+Ub9TzT(Z$!`+w1f9a{SYn?h^$U$mDJKK^KOurlxGlhf8coXvjW^}bgt-rFjD znHJ|Xo9Wew;}%JlW_Kjsthc+chb^XXpU3LOdy_KCS4OTntgUFd?YsGkV{8gPCrIUU z7xZ1<|6Oe5g44&3J$ODp>G5YKW=00a#V!U;2J*mEAgjzGVIbBZvce&_^W7!+r|T*d zavIkkZ(n>dQ`LY6q(GR3)qt6i@xOr}h$qOxWx&zImXe>Fn2DZTf#J-^uui3sYst!# z{ImS~I{#0uS#WDz%!i-3yIxOGkACKhN$DyM2L{LbrkL@xJCpY3crHP)J~LHEx;nsZlX$z1Na++S~*x&F8L zSHsIX`Pf;-7fz4f$yxWfRV}c$Te2$X#22gkrUlwjtNp9{IiD}~zjo}z)1J=iJoj~z zPFk$n)~eK8B%5+0@w1g$N8H(l+-H4!LOMmyS_+%b`$^TD`}g=z?!9=Gts-l=GE2@y HTP6bly#{pd diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der deleted file mode 100644 index d2817641bafb022339926786ab85b545f40ac665..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1147 zcmXqLVktLhVvb+H%*4pVB*OPMYW498DSV5WgVNVW|Jr8oCgimNFB_*;n@8JsUPeZ4 zRtAH{c0+ChPB!LH7B*p~&|nycgNMo4(NM@h03^fC!x>zfmseSqn3HNKV890w6yxnyM_06xZQcZ?W@M4y!->Y40>Qzg1y0 z&gNQpqF{|c>D+ZZ(rFIKEL_n{%!~|-iyLnkG+s4m+z3nvvdS!tD-9Z#Eo|&v(%3b4 zVzNoZ_g%=<^$R1;>=M3fAjvH2AGv2jo&gWY$-*qG2F#3%{|y8|JV6#N1CA!Pl>FSp z%sk}C2j(GQ()!XwH^ngbB~t_{rh@w&h5|n>_#Tt3odjy zuPCdY_u)fF^#8=jc^^+Q{aqN5`$@Gm({I-q6VU_LtQOD6naL>j(`V|D&;QI6%^$Dc z@$uH$=blqSKTV1FFaEpmVP=>@vvi*Y`l|mqO|ORNA0oIS8nOI z?NiA8%5=ljL@D29-OKpueEYz|zv5rHb554&taa#n7142tpUKWU?R-O_r^0)6j>G3Y aOB;Xxy<$H9NT-Uzt(mu6i*GEf3IzZ)c%0q< diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der deleted file mode 100644 index d8c3710c85f9ff41ddfc709924c866350a727a4f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZa+?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der deleted file mode 100644 index dae619c097512f20d09d2054c63fc0f715d7be24..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 998 zcmXqLVt!=M#B^!_GZP~dlZZk6mxvepr#-!NVoqOL*`yn*LOZ7#@Un4gwRyCC=VfH% zW@RvFY&YaK;ACSCWnmL$3Jr#FICz+x9SwyH1VA$EJeHTH8JZfJT11KSnjmurkxQExm5_sxk(GhDiIJbd zpox)-sfm%1VO{t`EzVyWX$3+}`kJnrGQY>^=jnZyz4d%;|IQeuB_Gcfo1a{_>B5Fg zXYbI=4K*7QZdjc*51-)~EjRzyir2SPA5Z@zcTLeXV*g^*2f0P(7p>c z?c<>#uh>H(ryKBq6bQ4h8Za|5{x=W=@dR183^XGxLzc8JJvw;mpWjp+5Pu z!*lq#5Sb^i@`a>klJNjzx%iu)2*?h3yy-gEL(;p^g~mZo#n!wzh9Qua5C+wfRd ztarh_l9E{Oro7&WNVTl@3-~vy&EDU;)-f|r-8<&mQ>C=;``&D2+5V0(pW(g3o1}Sj z^gY793Mi&+ntK1`1tm%ENt=U{rb%k1PtbRHEhYTylqR?8HqOeuvA1fBY>pc~NUFTR KZE!sDzAgX``fm0B diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der deleted file mode 100644 index ce7f8d31d6802c7e68c188af8797c3a063894857..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1147 zcmXqLVktLhVvb+H%*4pVB%&$)xidhT`$ARBi)~L`p2_`@+!*`b5acj4ER7|Ts-W~ z`MCv&d6kBO2K*oqZXOPo%(Bel%=|nTUzhK>3dC_Z(le%bCn$9moio9jI@8{ zd;5%c{XT!(ig$;5K+$bpwI6ChVmse+UV1r`!RY5*U#A%ppFJxS>PX#w$sz7*X_R~A zg!|$g&z66XI$)aW-TrxDio@fp!U^sS#+*GdHMB|H?BTf!^g~J_=D$Y-Ot{~xpTg~G+{k6 Zdr{DieczoQDL-a%Ib3(Tsv+bGCjf+Ap-TV& diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der deleted file mode 100644 index 04b0d73600b72f80a03943d41973b279db9e8b32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem deleted file mode 100644 index 493a5a26481..00000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem deleted file mode 100644 index 55a7f10c742..00000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der deleted file mode 100644 index 04b0d73600b72f80a03943d41973b279db9e8b32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta 0 { - log.Printf("Starting HTTP server on %q", *addr) - go func() { - if err := fasthttp.ListenAndServe(*addr, requestHandler); err != nil { - log.Fatalf("error in ListenAndServe: %v", err) - } - }() - } - - // Start HTTPS server. - if len(*addrTLS) > 0 { - log.Printf("Starting HTTPS server on %q", *addrTLS) - go func() { - if err := fasthttp.ListenAndServeTLS(*addrTLS, *certFile, *keyFile, requestHandler); err != nil { - log.Fatalf("error in ListenAndServeTLS: %v", err) - } - }() - } - - log.Printf("Serving files from directory %q", *dir) - log.Printf("See stats at http://%s/stats", *addr) - - // Wait forever. - select {} -} - -func updateFSCounters(ctx *fasthttp.RequestCtx) { - // Increment the number of fsHandler calls. - fsCalls.Add(1) - - // Update other stats counters - resp := &ctx.Response - switch resp.StatusCode() { - case fasthttp.StatusOK: - fsOKResponses.Add(1) - fsResponseBodyBytes.Add(int64(resp.Header.ContentLength())) - case fasthttp.StatusNotModified: - fsNotModifiedResponses.Add(1) - case fasthttp.StatusNotFound: - fsNotFoundResponses.Add(1) - default: - fsOtherResponses.Add(1) - } -} - -// Various counters - see https://pkg.go.dev/expvar for details. -var ( - // Counter for total number of fs calls - fsCalls = expvar.NewInt("fsCalls") - - // Counters for various response status codes - fsOKResponses = expvar.NewInt("fsOKResponses") - fsNotModifiedResponses = expvar.NewInt("fsNotModifiedResponses") - fsNotFoundResponses = expvar.NewInt("fsNotFoundResponses") - fsOtherResponses = expvar.NewInt("fsOtherResponses") - - // Total size in bytes for OK response bodies served. - fsResponseBodyBytes = expvar.NewInt("fsResponseBodyBytes") -) diff --git a/vendor/github.com/valyala/fasthttp/examples/fileserver/ssl-cert-snakeoil.pem b/vendor/github.com/valyala/fasthttp/examples/fileserver/ssl-cert-snakeoil.pem deleted file mode 100644 index 93e77cd9569..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/fileserver/ssl-cert-snakeoil.pem +++ /dev/null @@ -1,17 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV -BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV -MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D -K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te -+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij -L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 -xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY -6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG -SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 -L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 -45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li -K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 -X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI -whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd ------END CERTIFICATE----- diff --git a/vendor/github.com/valyala/fasthttp/examples/helloworldserver/Makefile b/vendor/github.com/valyala/fasthttp/examples/helloworldserver/Makefile deleted file mode 100644 index ce30ae9fa57..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/helloworldserver/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -helloworldserver: clean - go get -u github.com/valyala/fasthttp - go build - -clean: - rm -f helloworldserver diff --git a/vendor/github.com/valyala/fasthttp/examples/helloworldserver/README.md b/vendor/github.com/valyala/fasthttp/examples/helloworldserver/README.md deleted file mode 100644 index 80e801ecf46..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/helloworldserver/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# HelloWorld server example - -* Displays various request info. -* Sets response headers and cookies. -* Supports transparent compression. - -# How to build - -``` -make -``` - -# How to run - -``` -./helloworldserver -addr=tcp.addr.to.listen:to -``` diff --git a/vendor/github.com/valyala/fasthttp/examples/helloworldserver/helloworldserver.go b/vendor/github.com/valyala/fasthttp/examples/helloworldserver/helloworldserver.go deleted file mode 100644 index a22e0b78b57..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/helloworldserver/helloworldserver.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - - "github.com/valyala/fasthttp" -) - -var ( - addr = flag.String("addr", ":8080", "TCP address to listen to") - compress = flag.Bool("compress", false, "Whether to enable transparent response compression") -) - -func main() { - flag.Parse() - - h := requestHandler - if *compress { - h = fasthttp.CompressHandler(h) - } - - if err := fasthttp.ListenAndServe(*addr, h); err != nil { - log.Fatalf("Error in ListenAndServe: %v", err) - } -} - -func requestHandler(ctx *fasthttp.RequestCtx) { - fmt.Fprintf(ctx, "Hello, world!\n\n") - - fmt.Fprintf(ctx, "Request method is %q\n", ctx.Method()) - fmt.Fprintf(ctx, "RequestURI is %q\n", ctx.RequestURI()) - fmt.Fprintf(ctx, "Requested path is %q\n", ctx.Path()) - fmt.Fprintf(ctx, "Host is %q\n", ctx.Host()) - fmt.Fprintf(ctx, "Query string is %q\n", ctx.QueryArgs()) - fmt.Fprintf(ctx, "User-Agent is %q\n", ctx.UserAgent()) - fmt.Fprintf(ctx, "Connection has been established at %s\n", ctx.ConnTime()) - fmt.Fprintf(ctx, "Request has been started at %s\n", ctx.Time()) - fmt.Fprintf(ctx, "Serial request number for the current connection is %d\n", ctx.ConnRequestNum()) - fmt.Fprintf(ctx, "Your ip is %q\n\n", ctx.RemoteIP()) - - fmt.Fprintf(ctx, "Raw request is:\n---CUT---\n%s\n---CUT---", &ctx.Request) - - ctx.SetContentType("text/plain; charset=utf8") - - // Set arbitrary headers - ctx.Response.Header.Set("X-My-Header", "my-header-value") - - // Set cookies - var c fasthttp.Cookie - c.SetKey("cookie-name") - c.SetValue("cookie-value") - ctx.Response.Header.SetCookie(&c) -} diff --git a/vendor/github.com/valyala/fasthttp/examples/host_client/Makefile b/vendor/github.com/valyala/fasthttp/examples/host_client/Makefile deleted file mode 100644 index 161ab4454a0..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/host_client/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -host_client: clean - go get -u github.com/valyala/fasthttp - go build - -clean: - rm -f host_client diff --git a/vendor/github.com/valyala/fasthttp/examples/host_client/README.md b/vendor/github.com/valyala/fasthttp/examples/host_client/README.md deleted file mode 100644 index e40b3976ea5..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/host_client/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Host Client Example - -The HostClient is useful when calling an API from a single host. -The example also shows how to use URI. -You may create the parsed URI once and reuse it in many requests. -The URI has a username and password for Basic Auth but you may also set other parts i.e. `SetPath()`, `SetQueryString()`. - -# How to build and run -Start a web server on localhost:8080 then execute: - - make - ./host_client - diff --git a/vendor/github.com/valyala/fasthttp/examples/host_client/hostclient.go b/vendor/github.com/valyala/fasthttp/examples/host_client/hostclient.go deleted file mode 100644 index 997abd4c96a..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/host_client/hostclient.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/valyala/fasthttp" -) - -func main() { - // Get URI from a pool - url := fasthttp.AcquireURI() - url.Parse(nil, []byte("http://localhost:8080/")) - url.SetUsername("Aladdin") - url.SetPassword("Open Sesame") - - hc := &fasthttp.HostClient{ - Addr: "localhost:8080", // The host address and port must be set explicitly - } - - req := fasthttp.AcquireRequest() - req.SetURI(url) // copy url into request - fasthttp.ReleaseURI(url) // now you may release the URI - - req.Header.SetMethod(fasthttp.MethodGet) - resp := fasthttp.AcquireResponse() - err := hc.Do(req, resp) - fasthttp.ReleaseRequest(req) - if err == nil { - fmt.Printf("Response: %s\n", resp.Body()) - } else { - fmt.Fprintf(os.Stderr, "Connection error: %v\n", err) - } - fasthttp.ReleaseResponse(resp) -} diff --git a/vendor/github.com/valyala/fasthttp/examples/letsencrypt/letsencryptserver.go b/vendor/github.com/valyala/fasthttp/examples/letsencrypt/letsencryptserver.go deleted file mode 100644 index d08716871aa..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/letsencrypt/letsencryptserver.go +++ /dev/null @@ -1,41 +0,0 @@ -package main - -import ( - "crypto/tls" - "net" - - "github.com/valyala/fasthttp" - "golang.org/x/crypto/acme" - "golang.org/x/crypto/acme/autocert" -) - -func requestHandler(ctx *fasthttp.RequestCtx) { - ctx.SetBodyString("hello from https!") -} - -func main() { - m := &autocert.Manager{ - Prompt: autocert.AcceptTOS, - HostPolicy: autocert.HostWhitelist("example.com"), // Replace with your domain. - Cache: autocert.DirCache("./certs"), - } - - cfg := &tls.Config{ - GetCertificate: m.GetCertificate, - NextProtos: []string{ - "http/1.1", acme.ALPNProto, - }, - } - - // Let's Encrypt tls-alpn-01 only works on port 443. - ln, err := net.Listen("tcp4", "0.0.0.0:443") /* #nosec G102 */ - if err != nil { - panic(err) - } - - lnTls := tls.NewListener(ln, cfg) - - if err := fasthttp.Serve(lnTls, requestHandler); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/valyala/fasthttp/examples/multidomain/Makefile b/vendor/github.com/valyala/fasthttp/examples/multidomain/Makefile deleted file mode 100644 index 0787e5050f0..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/multidomain/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -writer: clean - go get -u github.com/valyala/fasthttp - go build - -clean: - rm -f multidomain diff --git a/vendor/github.com/valyala/fasthttp/examples/multidomain/README.md b/vendor/github.com/valyala/fasthttp/examples/multidomain/README.md deleted file mode 100644 index 12c09ecadbe..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/multidomain/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Multidomain using SSL certs example - -* Prints two messages depending on visited host. - -# How to build - -``` -make -``` - -# How to run - -``` -./multidomain -``` diff --git a/vendor/github.com/valyala/fasthttp/examples/multidomain/multidomain.go b/vendor/github.com/valyala/fasthttp/examples/multidomain/multidomain.go deleted file mode 100644 index 9b5ca15debe..00000000000 --- a/vendor/github.com/valyala/fasthttp/examples/multidomain/multidomain.go +++ /dev/null @@ -1,63 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/valyala/fasthttp" -) - -var domains = make(map[string]fasthttp.RequestHandler) - -func main() { - server := &fasthttp.Server{ - // You can check the access using openssl command: - // $ openssl s_client -connect localhost:8080 << EOF - // > GET / - // > Host: localhost - // > EOF - // - // $ openssl s_client -connect localhost:8080 << EOF - // > GET / - // > Host: 127.0.0.1:8080 - // > EOF - // - Handler: func(ctx *fasthttp.RequestCtx) { - h, ok := domains[string(ctx.Host())] - if !ok { - ctx.NotFound() - return - } - h(ctx) - }, - } - - // preparing first host - cert, priv, err := fasthttp.GenerateTestCertificate("localhost:8080") - if err != nil { - panic(err) - } - domains["localhost:8080"] = func(ctx *fasthttp.RequestCtx) { - ctx.WriteString("You are accessing to localhost:8080\n") - } - - err = server.AppendCertEmbed(cert, priv) - if err != nil { - panic(err) - } - - // preparing second host - cert, priv, err = fasthttp.GenerateTestCertificate("127.0.0.1") - if err != nil { - panic(err) - } - domains["127.0.0.1:8080"] = func(ctx *fasthttp.RequestCtx) { - ctx.WriteString("You are accessing to 127.0.0.1:8080\n") - } - - err = server.AppendCertEmbed(cert, priv) - if err != nil { - panic(err) - } - - fmt.Println(server.ListenAndServeTLS(":8080", "", "")) -} diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_test.go b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_test.go deleted file mode 100644 index 698285d756b..00000000000 --- a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package fasthttputil - -import ( - "bytes" - "context" - "fmt" - "io" - "net" - "net/http" - "sync" - "testing" - "time" -) - -func TestInmemoryListener(t *testing.T) { - ln := NewInmemoryListener() - - ch := make(chan struct{}) - for i := 0; i < 10; i++ { - go func(n int) { - conn, err := ln.Dial() - if err != nil { - t.Errorf("unexpected error: %v", err) - } - defer conn.Close() - req := fmt.Sprintf("request_%d", n) - nn, err := conn.Write([]byte(req)) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if nn != len(req) { - t.Errorf("unexpected number of bytes written: %d. Expecting %d", nn, len(req)) - } - buf := make([]byte, 30) - nn, err = conn.Read(buf) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - buf = buf[:nn] - resp := fmt.Sprintf("response_%d", n) - if nn != len(resp) { - t.Errorf("unexpected number of bytes read: %d. Expecting %d", nn, len(resp)) - } - if string(buf) != resp { - t.Errorf("unexpected response %q. Expecting %q", buf, resp) - } - ch <- struct{}{} - }(i) - } - - serverCh := make(chan struct{}) - go func() { - for { - conn, err := ln.Accept() - if err != nil { - close(serverCh) - return - } - defer conn.Close() - buf := make([]byte, 30) - n, err := conn.Read(buf) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - buf = buf[:n] - if !bytes.HasPrefix(buf, []byte("request_")) { - t.Errorf("unexpected request prefix %q. Expecting %q", buf, "request_") - } - resp := fmt.Sprintf("response_%s", buf[len("request_"):]) - n, err = conn.Write([]byte(resp)) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if n != len(resp) { - t.Errorf("unexpected number of bytes written: %d. Expecting %d", n, len(resp)) - } - } - }() - - for i := 0; i < 10; i++ { - select { - case <-ch: - case <-time.After(time.Second): - t.Fatalf("timeout") - } - } - - if err := ln.Close(); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - select { - case <-serverCh: - case <-time.After(time.Second): - t.Fatalf("timeout") - } -} - -// echoServerHandler implements http.Handler. -type echoServerHandler struct { - t *testing.T -} - -func (s *echoServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - time.Sleep(time.Millisecond * 100) - if _, err := io.Copy(w, r.Body); err != nil { - s.t.Fatalf("unexpected error: %v", err) - } -} - -func testInmemoryListenerHTTP(t *testing.T, f func(t *testing.T, client *http.Client)) { - ln := NewInmemoryListener() - defer ln.Close() - - client := &http.Client{ - Transport: &http.Transport{ - DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - return ln.Dial() - }, - }, - Timeout: time.Second, - } - - server := &http.Server{ - Handler: &echoServerHandler{t}, - } - - go func() { - if err := server.Serve(ln); err != nil && err != http.ErrServerClosed { - t.Errorf("unexpected error: %v", err) - } - }() - - f(t, client) - - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) - defer cancel() - server.Shutdown(ctx) //nolint:errcheck -} - -func testInmemoryListenerHTTPSingle(t *testing.T, client *http.Client, content string) { - res, err := client.Post("http://...", "text/plain", bytes.NewBufferString(content)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - defer func() { _ = res.Body.Close() }() - b, err := io.ReadAll(res.Body) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - s := string(b) - if string(b) != content { - t.Fatalf("unexpected response %q, expecting %q", s, content) - } -} - -func TestInmemoryListenerHTTPSingle(t *testing.T) { - testInmemoryListenerHTTP(t, func(t *testing.T, client *http.Client) { - testInmemoryListenerHTTPSingle(t, client, "request") - }) -} - -func TestInmemoryListenerHTTPSerial(t *testing.T) { - testInmemoryListenerHTTP(t, func(t *testing.T, client *http.Client) { - for i := 0; i < 10; i++ { - testInmemoryListenerHTTPSingle(t, client, fmt.Sprintf("request_%d", i)) - } - }) -} - -func TestInmemoryListenerHTTPConcurrent(t *testing.T) { - testInmemoryListenerHTTP(t, func(t *testing.T, client *http.Client) { - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - testInmemoryListenerHTTPSingle(t, client, fmt.Sprintf("request_%d", i)) - }(i) - } - wg.Wait() - }) -} - -func acceptLoop(ln net.Listener) { - for { - conn, err := ln.Accept() - if err != nil { - panic(err) - } - - conn.Close() - } -} - -func TestInmemoryListenerAddrDefault(t *testing.T) { - ln := NewInmemoryListener() - - verifyAddr(t, ln.Addr(), inmemoryAddr(0)) - - go func() { - c, err := ln.Dial() - if err != nil { - panic(err) - } - - c.Close() - }() - - lc, err := ln.Accept() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - verifyAddr(t, lc.LocalAddr(), inmemoryAddr(0)) - verifyAddr(t, lc.RemoteAddr(), pipeAddr(0)) - - go acceptLoop(ln) - - c, err := ln.Dial() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - verifyAddr(t, c.LocalAddr(), pipeAddr(0)) - verifyAddr(t, c.RemoteAddr(), inmemoryAddr(0)) -} - -func verifyAddr(t *testing.T, got, expected net.Addr) { - if got != expected { - t.Fatalf("unexpected addr: %v. Expecting %v", got, expected) - } -} - -func TestInmemoryListenerAddrCustom(t *testing.T) { - ln := NewInmemoryListener() - - listenerAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 12345} - - ln.SetLocalAddr(listenerAddr) - - verifyAddr(t, ln.Addr(), listenerAddr) - - go func() { - c, err := ln.Dial() - if err != nil { - panic(err) - } - - c.Close() - }() - - lc, err := ln.Accept() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - verifyAddr(t, lc.LocalAddr(), listenerAddr) - verifyAddr(t, lc.RemoteAddr(), pipeAddr(0)) - - go acceptLoop(ln) - - clientAddr := &net.TCPAddr{IP: net.IPv4(127, 0, 0, 2), Port: 65432} - - c, err := ln.DialWithLocalAddr(clientAddr) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - verifyAddr(t, c.LocalAddr(), clientAddr) - verifyAddr(t, c.RemoteAddr(), listenerAddr) -} diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_timing_test.go b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_timing_test.go deleted file mode 100644 index ee398009ac2..00000000000 --- a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener_timing_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package fasthttputil_test - -import ( - "crypto/tls" - "net" - "testing" - - "github.com/valyala/fasthttp" - "github.com/valyala/fasthttp/fasthttputil" -) - -var ( - certblock = []byte(`-----BEGIN CERTIFICATE----- -MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV -BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV -MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D -K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te -+z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij -L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 -xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY -6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG -SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 -L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 -45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li -K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 -X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI -whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd ------END CERTIFICATE----- -`) - keyblock = []byte(`-----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG -3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U -wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 -FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf -IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg -GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF -sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 -sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D -uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb -K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 -YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ -DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk -B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV -Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x -IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY -wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj -wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D -FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m -tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX -fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU -ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk -K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT -6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt -9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN -Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV -c257YgaWmjK9uB0Y2r2VxS0G ------END PRIVATE KEY-----`) -) - -// BenchmarkPlainStreaming measures end-to-end plaintext streaming performance -// for fasthttp client and server. -// -// It issues http requests over a small number of keep-alive connections. -func BenchmarkPlainStreaming(b *testing.B) { - benchmark(b, streamingHandler, false) -} - -// BenchmarkPlainHandshake measures end-to-end plaintext handshake performance -// for fasthttp client and server. -// -// It re-establishes new connection per each http request. -func BenchmarkPlainHandshake(b *testing.B) { - benchmark(b, handshakeHandler, false) -} - -// BenchmarkTLSStreaming measures end-to-end TLS streaming performance -// for fasthttp client and server. -// -// It issues http requests over a small number of TLS keep-alive connections. -func BenchmarkTLSStreaming(b *testing.B) { - benchmark(b, streamingHandler, true) -} - -func benchmark(b *testing.B, h fasthttp.RequestHandler, isTLS bool) { - var serverTLSConfig, clientTLSConfig *tls.Config - if isTLS { - cert, err := tls.X509KeyPair(certblock, keyblock) - if err != nil { - b.Fatalf("cannot load TLS certificate: %v", err) - } - serverTLSConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, - PreferServerCipherSuites: true, - } - serverTLSConfig.CurvePreferences = []tls.CurveID{} - clientTLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } - ln := fasthttputil.NewInmemoryListener() - serverStopCh := make(chan struct{}) - go func() { - serverLn := net.Listener(ln) - if serverTLSConfig != nil { - serverLn = tls.NewListener(serverLn, serverTLSConfig) - } - if err := fasthttp.Serve(serverLn, h); err != nil { - b.Errorf("unexpected error in server: %v", err) - } - close(serverStopCh) - }() - c := &fasthttp.HostClient{ - Dial: func(addr string) (net.Conn, error) { - return ln.Dial() - }, - IsTLS: isTLS, - TLSConfig: clientTLSConfig, - } - - b.RunParallel(func(pb *testing.PB) { - runRequests(b, pb, c, isTLS) - }) - ln.Close() - <-serverStopCh -} - -func streamingHandler(ctx *fasthttp.RequestCtx) { - ctx.WriteString("foobar") //nolint:errcheck -} - -func handshakeHandler(ctx *fasthttp.RequestCtx) { - streamingHandler(ctx) - - // Explicitly close connection after each response. - ctx.SetConnectionClose() -} - -func runRequests(b *testing.B, pb *testing.PB, c *fasthttp.HostClient, isTLS bool) { - var req fasthttp.Request - if isTLS { - req.SetRequestURI("https://foo.bar/baz") - } else { - req.SetRequestURI("http://foo.bar/baz") - } - var resp fasthttp.Response - for pb.Next() { - if err := c.Do(&req, &resp); err != nil { - b.Fatalf("unexpected error: %v", err) - } - if resp.StatusCode() != fasthttp.StatusOK { - b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), fasthttp.StatusOK) - } - } -} diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns_test.go b/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns_test.go deleted file mode 100644 index 9ac7ee14c73..00000000000 --- a/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns_test.go +++ /dev/null @@ -1,407 +0,0 @@ -package fasthttputil - -import ( - "bytes" - "fmt" - "io" - "net" - "testing" - "time" -) - -func TestPipeConnsWriteTimeout(t *testing.T) { - t.Parallel() - - pc := NewPipeConns() - c1 := pc.Conn1() - - deadline := time.Now().Add(time.Millisecond) - if err := c1.SetWriteDeadline(deadline); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - data := []byte("foobar") - for { - _, err := c1.Write(data) - if err != nil { - if err == ErrTimeout { - break - } - t.Fatalf("unexpected error: %v", err) - } - } - - for i := 0; i < 10; i++ { - _, err := c1.Write(data) - if err == nil { - t.Fatalf("expecting error") - } - if err != ErrTimeout { - t.Fatalf("unexpected error: %v. Expecting %v", err, ErrTimeout) - } - } - - // read the written data - c2 := pc.Conn2() - if err := c2.SetReadDeadline(time.Now().Add(10 * time.Millisecond)); err != nil { - t.Fatalf("unexpected error: %v", err) - } - for { - _, err := c2.Read(data) - if err != nil { - if err == ErrTimeout { - break - } - t.Fatalf("unexpected error: %v", err) - } - } - - for i := 0; i < 10; i++ { - _, err := c2.Read(data) - if err == nil { - t.Fatalf("expecting error") - } - if err != ErrTimeout { - t.Fatalf("unexpected error: %v. Expecting %v", err, ErrTimeout) - } - } -} - -func TestPipeConnsPositiveReadTimeout(t *testing.T) { - t.Parallel() - - testPipeConnsReadTimeout(t, time.Millisecond) -} - -func TestPipeConnsNegativeReadTimeout(t *testing.T) { - t.Parallel() - - testPipeConnsReadTimeout(t, -time.Second) -} - -var zeroTime time.Time - -func testPipeConnsReadTimeout(t *testing.T, timeout time.Duration) { - pc := NewPipeConns() - c1 := pc.Conn1() - - deadline := time.Now().Add(timeout) - if err := c1.SetReadDeadline(deadline); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var buf [1]byte - for i := 0; i < 10; i++ { - _, err := c1.Read(buf[:]) - if err == nil { - t.Fatalf("expecting error on iteration %d", i) - } - if err != ErrTimeout { - t.Fatalf("unexpected error on iteration %d: %v. Expecting %v", i, err, ErrTimeout) - } - } - - // disable deadline and send data from c2 to c1 - if err := c1.SetReadDeadline(zeroTime); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - data := []byte("foobar") - c2 := pc.Conn2() - if _, err := c2.Write(data); err != nil { - t.Fatalf("unexpected error: %v", err) - } - dataBuf := make([]byte, len(data)) - if _, err := io.ReadFull(c1, dataBuf); err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !bytes.Equal(data, dataBuf) { - t.Fatalf("unexpected data received: %q. Expecting %q", dataBuf, data) - } -} - -func TestPipeConnsCloseWhileReadWriteConcurrent(t *testing.T) { - t.Parallel() - - concurrency := 4 - ch := make(chan struct{}, concurrency) - for i := 0; i < concurrency; i++ { - go func() { - testPipeConnsCloseWhileReadWriteSerial(t) - ch <- struct{}{} - }() - } - - for i := 0; i < concurrency; i++ { - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - } -} - -func TestPipeConnsCloseWhileReadWriteSerial(t *testing.T) { - t.Parallel() - - testPipeConnsCloseWhileReadWriteSerial(t) -} - -func testPipeConnsCloseWhileReadWriteSerial(t *testing.T) { - for i := 0; i < 10; i++ { - testPipeConnsCloseWhileReadWrite(t) - } -} - -func testPipeConnsCloseWhileReadWrite(t *testing.T) { - pc := NewPipeConns() - c1 := pc.Conn1() - c2 := pc.Conn2() - - readCh := make(chan error) - go func() { - var err error - if _, err = io.Copy(io.Discard, c1); err != nil { - if err != errConnectionClosed { - err = fmt.Errorf("unexpected error: %w", err) - } else { - err = nil - } - } - readCh <- err - }() - - writeCh := make(chan error) - go func() { - var err error - for { - if _, err = c2.Write([]byte("foobar")); err != nil { - if err != errConnectionClosed { - err = fmt.Errorf("unexpected error: %w", err) - } else { - err = nil - } - break - } - } - writeCh <- err - }() - - time.Sleep(10 * time.Millisecond) - if err := c1.Close(); err != nil { - t.Fatalf("unexpected error: %v", err) - } - if err := c2.Close(); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - select { - case err := <-readCh: - if err != nil { - t.Fatalf("unexpected error in reader: %v", err) - } - case <-time.After(time.Second): - t.Fatalf("timeout") - } - select { - case err := <-writeCh: - if err != nil { - t.Fatalf("unexpected error in writer: %v", err) - } - case <-time.After(time.Second): - t.Fatalf("timeout") - } -} - -func TestPipeConnsReadWriteSerial(t *testing.T) { - t.Parallel() - - testPipeConnsReadWriteSerial(t) -} - -func TestPipeConnsReadWriteConcurrent(t *testing.T) { - t.Parallel() - - testConcurrency(t, 10, testPipeConnsReadWriteSerial) -} - -func testPipeConnsReadWriteSerial(t *testing.T) { - pc := NewPipeConns() - testPipeConnsReadWrite(t, pc.Conn1(), pc.Conn2()) - - pc = NewPipeConns() - testPipeConnsReadWrite(t, pc.Conn2(), pc.Conn1()) -} - -func testPipeConnsReadWrite(t *testing.T, c1, c2 net.Conn) { - defer c1.Close() - defer c2.Close() - - var buf [32]byte - for i := 0; i < 10; i++ { - // The first write - s1 := fmt.Sprintf("foo_%d", i) - n, err := c1.Write([]byte(s1)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if n != len(s1) { - t.Fatalf("unexpected number of bytes written: %d. Expecting %d", n, len(s1)) - } - - // The second write - s2 := fmt.Sprintf("bar_%d", i) - n, err = c1.Write([]byte(s2)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if n != len(s2) { - t.Fatalf("unexpected number of bytes written: %d. Expecting %d", n, len(s2)) - } - - // Read data written above in two writes - s := s1 + s2 - n, err = c2.Read(buf[:]) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if n != len(s) { - t.Fatalf("unexpected number of bytes read: %d. Expecting %d", n, len(s)) - } - if string(buf[:n]) != s { - t.Fatalf("unexpected string read: %q. Expecting %q", buf[:n], s) - } - } -} - -func TestPipeConnsCloseSerial(t *testing.T) { - t.Parallel() - - testPipeConnsCloseSerial(t) -} - -func TestPipeConnsCloseConcurrent(t *testing.T) { - t.Parallel() - - testConcurrency(t, 10, testPipeConnsCloseSerial) -} - -func testPipeConnsCloseSerial(t *testing.T) { - pc := NewPipeConns() - testPipeConnsClose(t, pc.Conn1(), pc.Conn2()) - - pc = NewPipeConns() - testPipeConnsClose(t, pc.Conn2(), pc.Conn1()) -} - -func testPipeConnsClose(t *testing.T, c1, c2 net.Conn) { - if err := c1.Close(); err != nil { - t.Fatalf("unexpected error: %v", err) - } - var buf [10]byte - - // attempt writing to closed conn - for i := 0; i < 10; i++ { - n, err := c1.Write(buf[:]) - if err == nil { - t.Fatalf("expecting error") - } - if n != 0 { - t.Fatalf("unexpected number of bytes written: %d. Expecting 0", n) - } - } - - // attempt reading from closed conn - for i := 0; i < 10; i++ { - n, err := c2.Read(buf[:]) - if err == nil { - t.Fatalf("expecting error") - } - if err != io.EOF { - t.Fatalf("unexpected error: %v. Expecting %v", err, io.EOF) - } - if n != 0 { - t.Fatalf("unexpected number of bytes read: %d. Expecting 0", n) - } - } - - if err := c2.Close(); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // attempt closing already closed conns - for i := 0; i < 10; i++ { - if err := c1.Close(); err != nil { - t.Fatalf("unexpected error: %v", err) - } - if err := c2.Close(); err != nil { - t.Fatalf("unexpected error: %v", err) - } - } -} - -func testConcurrency(t *testing.T, concurrency int, f func(*testing.T)) { - ch := make(chan struct{}, concurrency) - for i := 0; i < concurrency; i++ { - go func() { - f(t) - ch <- struct{}{} - }() - } - - for i := 0; i < concurrency; i++ { - select { - case <-ch: - case <-time.After(time.Second): - t.Fatalf("timeout") - } - } -} - -func TestPipeConnsAddrDefault(t *testing.T) { - t.Parallel() - - pc := NewPipeConns() - c1 := pc.Conn1() - - if c1.LocalAddr() != pipeAddr(0) { - t.Fatalf("unexpected local address: %v", c1.LocalAddr()) - } - - if c1.RemoteAddr() != pipeAddr(0) { - t.Fatalf("unexpected remote address: %v", c1.RemoteAddr()) - } -} - -func TestPipeConnsAddrCustom(t *testing.T) { - t.Parallel() - - pc := NewPipeConns() - - addr1 := &net.TCPAddr{IP: net.IPv4(1, 2, 3, 4), Port: 1234} - addr2 := &net.TCPAddr{IP: net.IPv4(5, 6, 7, 8), Port: 5678} - addr3 := &net.TCPAddr{IP: net.IPv4(9, 10, 11, 12), Port: 9012} - addr4 := &net.TCPAddr{IP: net.IPv4(13, 14, 15, 16), Port: 3456} - - pc.SetAddresses(addr1, addr2, addr3, addr4) - - c1 := pc.Conn1() - - if c1.LocalAddr() != addr1 { - t.Fatalf("unexpected local address: %v", c1.LocalAddr()) - } - - if c1.RemoteAddr() != addr2 { - t.Fatalf("unexpected remote address: %v", c1.RemoteAddr()) - } - - c2 := pc.Conn1() - - if c2.LocalAddr() != addr1 { - t.Fatalf("unexpected local address: %v", c2.LocalAddr()) - } - - if c2.RemoteAddr() != addr2 { - t.Fatalf("unexpected remote address: %v", c2.RemoteAddr()) - } -} diff --git a/vendor/github.com/valyala/fasthttp/testdata/test.png b/vendor/github.com/valyala/fasthttp/testdata/test.png deleted file mode 100644 index 8b137891791..00000000000 --- a/vendor/github.com/valyala/fasthttp/testdata/test.png +++ /dev/null @@ -1 +0,0 @@ - diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974d23..4ed2e488b61 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -263,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -549,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 93a38a97d9c..877a62b479a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -502,6 +502,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -657,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -1339,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1627,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1653,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -2169,7 +2176,7 @@ const ( NFT_SECMARK_CTX_MAXLEN = 0x100 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2403,6 +2410,7 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e @@ -2896,8 +2904,9 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -2918,7 +2927,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3051,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3071,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3260,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c1b0..e4bc0bd57c7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca436004fa..689317afdbf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d1534..14270508b04 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -87,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 0036746ea19..4740b834854 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4605,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x149 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1f + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5703,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -6001,3 +6001,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a8a7b..6f7d2ac70a9 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c6035ddfa9..9f73df75b5f 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -401,6 +401,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -3486,6 +3487,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 365f558be99..be5698960af 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -102,11 +102,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -116,7 +116,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/telemetry github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.5.0 +# github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/internal github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid/publisher @@ -131,7 +131,7 @@ github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sas github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils -# github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 +# github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/admin @@ -1595,7 +1595,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.23.0 => golang.org/x/crypto v0.22.0 +# golang.org/x/crypto v0.24.0 => golang.org/x/crypto v0.22.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -1627,7 +1627,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.25.0 => golang.org/x/net v0.24.0 +# golang.org/x/net v0.26.0 => golang.org/x/net v0.24.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -1662,7 +1662,7 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.20.0 +# golang.org/x/sys v0.21.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -1672,7 +1672,7 @@ golang.org/x/sys/windows/registry # golang.org/x/term v0.19.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.15.0 => golang.org/x/text v0.14.0 +# golang.org/x/text v0.16.0 => golang.org/x/text v0.14.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding From 2bc0d63e3350384d0f6bfdf3cac9ddde1b660ffc Mon Sep 17 00:00:00 2001 From: Lanting Chiang <49918461+lantingchiang@users.noreply.github.com> Date: Tue, 30 Jul 2024 11:56:20 -0400 Subject: [PATCH 30/37] remove unused leader election parameters of metrics adapter (#5986) Signed-off-by: lanting.chiang Signed-off-by: novoselov --- CHANGELOG.md | 1 + cmd/adapter/main.go | 21 --------------------- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17beb5e715b..0c0c5757651 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - **General**: Introduce new Splunk Scaler ([#5904](https://github.com/kedacore/keda/issues/5904)) - **General**: Provide CloudEvents around the management of ScaledObjects resources ([#3522](https://github.com/kedacore/keda/issues/3522)) - **General**: Remove deprecated Kustomize commonLabels ([#5888](https://github.com/kedacore/keda/pull/5888)) +- **General**: Remove unused leader election parameters in metrics adapter ([#5959](https://github.com/kedacore/keda/issues/5959)) - **General**: Support for Kubernetes v1.30 ([#5828](https://github.com/kedacore/keda/issues/5828)) #### Experimental diff --git a/cmd/adapter/main.go b/cmd/adapter/main.go index a5b47c27ec8..29bfaff9012 100644 --- a/cmd/adapter/main.go +++ b/cmd/adapter/main.go @@ -86,24 +86,6 @@ func (a *Adapter) makeProvider(ctx context.Context) (provider.ExternalMetricsPro return nil, nil, fmt.Errorf("failed to get watch namespace (%s)", err) } - leaseDuration, err := kedautil.ResolveOsEnvDuration("KEDA_METRICS_LEADER_ELECTION_LEASE_DURATION") - if err != nil { - logger.Error(err, "invalid KEDA_METRICS_LEADER_ELECTION_LEASE_DURATION") - return nil, nil, fmt.Errorf("invalid KEDA_METRICS_LEADER_ELECTION_LEASE_DURATION (%s)", err) - } - - renewDeadline, err := kedautil.ResolveOsEnvDuration("KEDA_METRICS_LEADER_ELECTION_RENEW_DEADLINE") - if err != nil { - logger.Error(err, "Invalid KEDA_METRICS_LEADER_ELECTION_RENEW_DEADLINE") - return nil, nil, fmt.Errorf("invalid KEDA_METRICS_LEADER_ELECTION_RENEW_DEADLINE (%s)", err) - } - - retryPeriod, err := kedautil.ResolveOsEnvDuration("KEDA_METRICS_LEADER_ELECTION_RETRY_PERIOD") - if err != nil { - logger.Error(err, "Invalid KEDA_METRICS_LEADER_ELECTION_RETRY_PERIOD") - return nil, nil, fmt.Errorf("invalid KEDA_METRICS_LEADER_ELECTION_RETRY_PERIOD (%s)", err) - } - // Get a config to talk to the apiserver cfg := ctrl.GetConfigOrDie() cfg.QPS = adapterClientRequestQPS @@ -121,9 +103,6 @@ func (a *Adapter) makeProvider(ctx context.Context) (provider.ExternalMetricsPro DefaultNamespaces: namespaces, }, PprofBindAddress: profilingAddr, - LeaseDuration: leaseDuration, - RenewDeadline: renewDeadline, - RetryPeriod: retryPeriod, }) if err != nil { logger.Error(err, "failed to setup manager") From 07d89b468ddec9ea62d88daab951b36c4bbae8ee Mon Sep 17 00:00:00 2001 From: cyrilico <19289022+cyrilico@users.noreply.github.com> Date: Tue, 30 Jul 2024 17:47:28 +0100 Subject: [PATCH 31/37] feat: Dynatrace scaler (#5685) * Add first scaler version Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * small refactor for response validation Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Add 'from' property, rename host/token Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Add parsing tests Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * update changelog Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Update CHANGELOG.md Signed-off-by: damas <19289022+cyrilico@users.noreply.github.com> * Update values type to float64 Signed-off-by: damas <19289022+cyrilico@users.noreply.github.com> * Remove unnecessary conversion Signed-off-by: damas <19289022+cyrilico@users.noreply.github.com> * e2e tests Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Jorge Turrado Ferrero Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Update dynatrace_test.go Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Fix bad templating for e2e tests Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Revert unnecessary (?) template variable change Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Apply suggestions from code review Signed-off-by: Jorge Turrado Ferrero * Update tests/scalers/dynatrace/dynatrace_test.go Signed-off-by: Jorge Turrado Ferrero * Do not allow token to be passed in scaledobject trigger Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Remove bad secret, tweak dynakube test config Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Rename property in response parsing Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Update tests/scalers/dynatrace/dynatrace_test.go Signed-off-by: Jorge Turrado Ferrero * use new operator secret, update template variable naming Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * forgotten correct variable definition Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * try default value in query for e2e tests Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * fix missing closing parenthesis, bad indenting Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> * Update e2e test to use custom metrics Signed-off-by: Jorge Turrado * Close the body to fix static checks Signed-off-by: Jorge Turrado * use declarative scaler config Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> --------- Signed-off-by: cyrilico <19289022+cyrilico@users.noreply.github.com> Signed-off-by: damas <19289022+cyrilico@users.noreply.github.com> Signed-off-by: Jorge Turrado Ferrero Signed-off-by: Jorge Turrado Co-authored-by: Jorge Turrado Ferrero Co-authored-by: Jorge Turrado Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/scalers/dynatrace_scaler.go | 195 ++++++++++++++++++ pkg/scalers/dynatrace_scaler_test.go | 74 +++++++ pkg/scaling/scalers_builder.go | 2 + tests/scalers/dynatrace/dynatrace_test.go | 231 ++++++++++++++++++++++ 5 files changed, 503 insertions(+) create mode 100644 pkg/scalers/dynatrace_scaler.go create mode 100644 pkg/scalers/dynatrace_scaler_test.go create mode 100644 tests/scalers/dynatrace/dynatrace_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c0c5757651..4b646580924 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - **General**: Add --ca-dir flag to KEDA operator to specify directories with CA certificates for scalers to authenticate TLS connections (defaults to /custom/ca) ([#5860](https://github.com/kedacore/keda/issues/5860)) +- **General**: Add Dynatrace Scaler ([#5685](https://github.com/kedacore/keda/pull/5685)) - **General**: Declarative parsing of scaler config ([#5037](https://github.com/kedacore/keda/issues/5037)|[#5797](https://github.com/kedacore/keda/issues/5797)) - **General**: Introduce new Splunk Scaler ([#5904](https://github.com/kedacore/keda/issues/5904)) - **General**: Provide CloudEvents around the management of ScaledObjects resources ([#3522](https://github.com/kedacore/keda/issues/3522)) diff --git a/pkg/scalers/dynatrace_scaler.go b/pkg/scalers/dynatrace_scaler.go new file mode 100644 index 00000000000..76e2099eb3e --- /dev/null +++ b/pkg/scalers/dynatrace_scaler.go @@ -0,0 +1,195 @@ +package scalers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + neturl "net/url" + "strings" + + "github.com/go-logr/logr" + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/metrics/pkg/apis/external_metrics" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" + kedautil "github.com/kedacore/keda/v2/pkg/util" +) + +const ( + dynatraceMetricDataPointsAPI = "api/v2/metrics/query" +) + +type dynatraceScaler struct { + metricType v2.MetricTargetType + metadata *dynatraceMetadata + httpClient *http.Client + logger logr.Logger +} + +type dynatraceMetadata struct { + Host string `keda:"name=host, order=triggerMetadata;authParams"` + Token string `keda:"name=token, order=authParams"` + MetricSelector string `keda:"name=metricSelector, order=triggerMetadata"` + FromTimestamp string `keda:"name=from, order=triggerMetadata, default=now-2h, optional"` + Threshold float64 `keda:"name=threshold, order=triggerMetadata"` + ActivationThreshold float64 `keda:"name=activationThreshold, order=triggerMetadata, optional"` + TriggerIndex int +} + +// Model of relevant part of Dynatrace's Metric Data Points API Response +// as per https://docs.dynatrace.com/docs/dynatrace-api/environment-api/metric-v2/get-data-points#definition--MetricData +type dynatraceResponse struct { + Result []struct { + Data []struct { + Values []float64 `json:"values"` + } `json:"data"` + } `json:"result"` +} + +func NewDynatraceScaler(config *scalersconfig.ScalerConfig) (Scaler, error) { + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) + } + + logger := InitializeLogger(config, "dynatrace_scaler") + + meta, err := parseDynatraceMetadata(config) + if err != nil { + return nil, fmt.Errorf("error parsing dynatrace metadata: %w", err) + } + + httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false) + + logMsg := fmt.Sprintf("Initializing Dynatrace Scaler (Host: %s)", meta.Host) + + logger.Info(logMsg) + + return &dynatraceScaler{ + metricType: metricType, + metadata: meta, + httpClient: httpClient, + logger: logger}, nil +} + +func parseDynatraceMetadata(config *scalersconfig.ScalerConfig) (*dynatraceMetadata, error) { + meta := dynatraceMetadata{} + + meta.TriggerIndex = config.TriggerIndex + if err := config.TypedConfig(&meta); err != nil { + return nil, fmt.Errorf("error parsing dynatrace metadata: %w", err) + } + return &meta, nil +} + +func (s *dynatraceScaler) Close(context.Context) error { + if s.httpClient != nil { + s.httpClient.CloseIdleConnections() + } + return nil +} + +// Validate that response object contains the minimum expected structure +// as per https://docs.dynatrace.com/docs/dynatrace-api/environment-api/metric-v2/get-data-points#definition--MetricData +func validateDynatraceResponse(response *dynatraceResponse) error { + if len(response.Result) == 0 { + return errors.New("dynatrace response does not contain any results") + } + if len(response.Result[0].Data) == 0 { + return errors.New("dynatrace response does not contain any metric series") + } + if len(response.Result[0].Data[0].Values) == 0 { + return errors.New("dynatrace response does not contain any values for the metric series") + } + return nil +} + +func (s *dynatraceScaler) GetMetricValue(ctx context.Context) (float64, error) { + /* + * Build request + */ + var req *http.Request + var err error + + // Append host information to appropriate API endpoint + // Trailing slashes are removed from provided host information to avoid double slashes in the URL + dynatraceAPIURL := fmt.Sprintf("%s/%s", strings.TrimRight(s.metadata.Host, "/"), dynatraceMetricDataPointsAPI) + + // Add query parameters to the URL + url, _ := neturl.Parse(dynatraceAPIURL) + queryString := url.Query() + queryString.Set("metricSelector", s.metadata.MetricSelector) + queryString.Set("from", s.metadata.FromTimestamp) + url.RawQuery = queryString.Encode() + + req, err = http.NewRequestWithContext(ctx, "GET", url.String(), nil) + if err != nil { + return 0, err + } + + // Authentication header as per https://docs.dynatrace.com/docs/dynatrace-api/basics/dynatrace-api-authentication#authenticate + req.Header.Add("Authorization", fmt.Sprintf("Api-Token %s", s.metadata.Token)) + + /* + * Execute request + */ + r, err := s.httpClient.Do(req) + if err != nil { + return 0, err + } + defer r.Body.Close() + + if r.StatusCode != http.StatusOK { + msg := fmt.Sprintf("%s: api returned %d", r.Request.URL.Path, r.StatusCode) + return 0, errors.New(msg) + } + + /* + * Parse response + */ + b, err := io.ReadAll(r.Body) + if err != nil { + return 0, err + } + var dynatraceResponse *dynatraceResponse + err = json.Unmarshal(b, &dynatraceResponse) + if err != nil { + return -1, fmt.Errorf("unable to parse Dynatrace Metric Data Points API response: %w", err) + } + + err = validateDynatraceResponse(dynatraceResponse) + if err != nil { + return 0, err + } + + return dynatraceResponse.Result[0].Data[0].Values[0], nil +} + +func (s *dynatraceScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + val, err := s.GetMetricValue(ctx) + + if err != nil { + s.logger.Error(err, "error executing Dynatrace query") + return []external_metrics.ExternalMetricValue{}, false, err + } + + metric := GenerateMetricInMili(metricName, val) + + return []external_metrics.ExternalMetricValue{metric}, val > s.metadata.ActivationThreshold, nil +} + +func (s *dynatraceScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: GenerateMetricNameWithIndex(s.metadata.TriggerIndex, kedautil.NormalizeString("dynatrace")), + }, + Target: GetMetricTargetMili(s.metricType, s.metadata.Threshold), + } + metricSpec := v2.MetricSpec{ + External: externalMetric, Type: externalMetricType, + } + return []v2.MetricSpec{metricSpec} +} diff --git a/pkg/scalers/dynatrace_scaler_test.go b/pkg/scalers/dynatrace_scaler_test.go new file mode 100644 index 00000000000..a29a3de56d6 --- /dev/null +++ b/pkg/scalers/dynatrace_scaler_test.go @@ -0,0 +1,74 @@ +package scalers + +import ( + "context" + "fmt" + "testing" + + "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" +) + +type dynatraceMetadataTestData struct { + metadata map[string]string + authParams map[string]string + errorCase bool +} + +type dynatraceMetricIdentifier struct { + metadataTestData *dynatraceMetadataTestData + triggerIndex int + name string +} + +var testDynatraceMetadata = []dynatraceMetadataTestData{ + {map[string]string{}, map[string]string{}, true}, + // all properly formed + {map[string]string{"threshold": "100", "from": "now-3d", "metricSelector": "MyCustomEvent:filter(eq(\"someProperty\",\"someValue\")):count:splitBy(\"dt.entity.process_group\"):fold"}, map[string]string{"host": "http://dummy:1234", "token": "dummy"}, false}, + // malformed threshold + {map[string]string{"threshold": "abc", "from": "now-3d", "metricSelector": "MyCustomEvent:filter(eq(\"someProperty\",\"someValue\")):count:splitBy(\"dt.entity.process_group\"):fold"}, map[string]string{"host": "http://dummy:1234", "token": "dummy"}, true}, + // malformed activationThreshold + {map[string]string{"activationThreshold": "abc", "threshold": "100", "from": "now-3d", "metricSelector": "MyCustomEvent:filter(eq(\"someProperty\",\"someValue\")):count:splitBy(\"dt.entity.process_group\"):fold"}, map[string]string{"host": "http://dummy:1234", "token": "dummy"}, true}, + // missing threshold + {map[string]string{"metricSelector": "MyCustomEvent:filter(eq(\"someProperty\",\"someValue\")):count:splitBy(\"dt.entity.process_group\"):fold"}, map[string]string{"host": "http://dummy:1234", "token": "dummy"}, true}, + // missing metricsSelector + {map[string]string{"threshold": "100"}, map[string]string{"host": "http://dummy:1234", "token": "dummy"}, true}, + // missing token (must come from auth params) + {map[string]string{"token": "foo", "threshold": "100", "from": "now-3d", "metricSelector": "MyCustomEvent:filter(eq(\"someProperty\",\"someValue\")):count:splitBy(\"dt.entity.process_group\"):fold"}, map[string]string{"host": "http://dummy:1234"}, true}, +} + +var dynatraceMetricIdentifiers = []dynatraceMetricIdentifier{ + {&testDynatraceMetadata[1], 0, "s0-dynatrace"}, + {&testDynatraceMetadata[1], 1, "s1-dynatrace"}, +} + +func TestDynatraceParseMetadata(t *testing.T) { + for _, testData := range testDynatraceMetadata { + _, err := parseDynatraceMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + if err != nil && !testData.errorCase { + fmt.Printf("X: %s", testData.metadata) + t.Error("Expected success but got error", err) + } + if testData.errorCase && err == nil { + fmt.Printf("X: %s", testData.metadata) + t.Error("Expected error but got success") + } + } +} +func TestDynatraceGetMetricSpecForScaling(t *testing.T) { + for _, testData := range dynatraceMetricIdentifiers { + meta, err := parseDynatraceMetadata(&scalersconfig.ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, TriggerIndex: testData.triggerIndex}) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } + mockNewRelicScaler := dynatraceScaler{ + metadata: meta, + httpClient: nil, + } + + metricSpec := mockNewRelicScaler.GetMetricSpecForScaling(context.Background()) + metricName := metricSpec[0].External.Metric.Name + if metricName != testData.name { + t.Error("Wrong External metric source name:", metricName) + } + } +} diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 702521f7719..1f4549c7ffa 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -162,6 +162,8 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, return scalers.NewCronScaler(config) case "datadog": return scalers.NewDatadogScaler(ctx, config) + case "dynatrace": + return scalers.NewDynatraceScaler(config) case "elasticsearch": return scalers.NewElasticsearchScaler(config) case "etcd": diff --git a/tests/scalers/dynatrace/dynatrace_test.go b/tests/scalers/dynatrace/dynatrace_test.go new file mode 100644 index 00000000000..7d20231a91a --- /dev/null +++ b/tests/scalers/dynatrace/dynatrace_test.go @@ -0,0 +1,231 @@ +//go:build e2e +// +build e2e + +package dynatrace_test + +import ( + "bytes" + "encoding/base64" + "fmt" + "net/http" + "os" + "testing" + "time" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "dynatrace-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + secretName = fmt.Sprintf("%s-secret", testName) + triggerAuthName = fmt.Sprintf("%s-ta", testName) + dynatraceHost = os.Getenv("DYNATRACE_HOST") + dynatraceToken = os.Getenv("DYNATRACE_METRICS_TOKEN") + dynatraceInjestHost = fmt.Sprintf("%s/api/v2/metrics/ingest", dynatraceHost) + dynatraceMetricName = fmt.Sprintf("metric-%d", GetRandomNumber()) + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + TriggerAuthName string + SecretName string + DynatraceToken string + DynatraceHost string + MinReplicaCount string + MaxReplicaCount string + MetricName string +} + +const ( + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +data: + apiToken: {{.DynatraceToken}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: token + name: {{.SecretName}} + key: apiToken +` + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: prom-test-app + image: tbickford/simple-web-app-prometheus:a13ade9 + imagePullPolicy: IfNotPresent +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + pollingInterval: 1 + cooldownPeriod: 1 + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 10 + triggers: + - type: dynatrace + metadata: + host: {{.DynatraceHost}} + threshold: "2" + activationThreshold: "3" + metricSelector: "{{.MetricName}}:max" + from: now-2m + authenticationRef: + name: {{.TriggerAuthName}} +` +) + +func TestDynatraceScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + require.NotEmpty(t, dynatraceToken, "DYNATRACE_METRICS_TOKEN env variable is required for dynatrace tests") + require.NotEmpty(t, dynatraceHost, "DYNATRACE_HOST env variable is required for dynatrace tests") + + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + t.Cleanup(func() { + DeleteKubernetesResources(t, testNamespace, data, templates) + }) + + // Create kubernetes resources + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 1), + "replica count should be %s after a minute", minReplicaCount) + + // test scaling + testActivation(t, kc) + testScaleOut(t, kc) + testScaleIn(t, kc) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing activation ---") + stopCh := make(chan struct{}) + go setMetricValue(t, 1, stopCh) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 120) + close(stopCh) +} + +func testScaleOut(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale out ---") + stopCh := make(chan struct{}) + go setMetricValue(t, 10, stopCh) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 2 minutes", maxReplicaCount) + close(stopCh) +} + +func testScaleIn(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale in ---") + + stopCh := make(chan struct{}) + go setMetricValue(t, 0, stopCh) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 2 minutes", minReplicaCount) + close(stopCh) +} + +func setMetricValue(t *testing.T, value float64, stopCh <-chan struct{}) { + metric := fmt.Sprintf("%s %f", dynatraceMetricName, value) + for { + select { + case <-stopCh: + return + default: + time.Sleep(time.Second) + req, err := http.NewRequest("POST", dynatraceInjestHost, bytes.NewBufferString(metric)) + req.Header.Add("'Content-Type", "text/plain") + if err != nil { + t.Log("Invalid injection request") + continue + } + req.Header.Add("Authorization", fmt.Sprintf("Api-Token %s", dynatraceToken)) + r, err := http.DefaultClient.Do(req) + if err != nil { + t.Log("Error executing request") + continue + } + defer r.Body.Close() + if r.StatusCode != http.StatusAccepted { + msg := fmt.Sprintf("%s: api returned %d", r.Request.URL.Path, r.StatusCode) + t.Log(msg) + } + } + } +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + TriggerAuthName: triggerAuthName, + ScaledObjectName: scaledObjectName, + SecretName: secretName, + MinReplicaCount: fmt.Sprintf("%v", minReplicaCount), + MaxReplicaCount: fmt.Sprintf("%v", maxReplicaCount), + DynatraceToken: base64.StdEncoding.EncodeToString([]byte(dynatraceToken)), + DynatraceHost: dynatraceHost, + MetricName: dynatraceMetricName, + }, []Template{ + {Name: "secretTemplate", Config: secretTemplate}, + {Name: "triggerAuthenticationTemplate", Config: triggerAuthenticationTemplate}, + {Name: "deploymentTemplate", Config: deploymentTemplate}, + {Name: "scaledObjectTemplate", Config: scaledObjectTemplate}, + } +} From 4eb4eeddc39f9e882db2af5316a9911646afb960 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Tue, 30 Jul 2024 22:25:08 +0200 Subject: [PATCH 32/37] chore: Disable temporally AzPipeline WI e2e (#6003) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .../azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go b/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go index f1db3b68871..026e4f74944 100644 --- a/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go +++ b/tests/scalers/azure/azure_pipelines_aad_wi/azure_pipelines_aad_wi_test.go @@ -165,7 +165,8 @@ spec: ` ) -func TestScaler(t *testing.T) { +// TODO: Enable the test again when the infra is fixed +func DisabledTestScaler(t *testing.T) { // setup t.Log("--- setting up ---") require.NotEmpty(t, organizationURL, "AZURE_DEVOPS_ORGANIZATION_URL env variable is required for azure pipelines test") From 71785c137a35b37de1a940e0c30f324dadf96b9b Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Wed, 31 Jul 2024 20:15:10 +0200 Subject: [PATCH 33/37] chore: Prepare main branch for v2.14.1 (#6007) Signed-off-by: Jorge Turrado Signed-off-by: novoselov --- .github/ISSUE_TEMPLATE/3_bug_report.yml | 1 + CHANGELOG.md | 20 ++++++++++++-------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/3_bug_report.yml b/.github/ISSUE_TEMPLATE/3_bug_report.yml index ec7a7f40a70..52f270d2e32 100644 --- a/.github/ISSUE_TEMPLATE/3_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/3_bug_report.yml @@ -57,6 +57,7 @@ body: label: KEDA Version description: What version of KEDA that are you running? options: + - "2.14.1" - "2.14.0" - "2.13.1" - "2.13.0" diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b646580924..5df58c495d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -52,10 +52,6 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ## Unreleased -- **General**: Fix CVE-2024-24790, CVE-2024-24789, and CVE-2024-24791 in stdlib. -- **General**: Fix CVE-2024-35255 in github.com/Azure/azure-sdk-for-go/sdk/azidentity -- **General**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp - ### Breaking Changes - **Authentication:** AAD-Pod-Identity and AWS-KIAM auths have been removed ([#5035](https://github.com/kedacore/keda/issues/5035)|[#5085](https://github.com/kedacore/keda/issues/5085)) @@ -81,12 +77,10 @@ Here is an overview of all new **experimental** features: ### Improvements - **General**: Added `eagerScalingStrategy` for `ScaledJob` ([#5114](https://github.com/kedacore/keda/issues/5114)) -- **General**: Do not delete running Jobs on KEDA restart ([#5656](https://github.com/kedacore/keda/issues/5656)) - **Azure queue scaler**: Added new configuration option 'queueLengthStrategy' ([#4478](https://github.com/kedacore/keda/issues/4478)) - **Cassandra Scaler**: Add TLS support for cassandra scaler ([#5802](https://github.com/kedacore/keda/issues/5802)) - **GCP Pub/Sub**: Add optional valueIfNull to allow a default scaling value and prevent errors when GCP metric returns no value. ([#5896](https://github.com/kedacore/keda/issues/5896)) - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) -- **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) - **IBM MQ Scaler**: Add TLS support for IBM MQ scaler ([#5974](https://github.com/kedacore/keda/issues/5974)) - **Kafka**: Fix logic to scale to zero on invalid offset even with earliest offsetResetPolicy ([#5689](https://github.com/kedacore/keda/issues/5689)) - **MYSQL Scaler**: Add support to fetch username from env ([#5883](https://github.com/kedacore/keda/issues/5883)) @@ -96,9 +90,7 @@ Here is an overview of all new **experimental** features: - **General**: Check for missing CRD references and sample CRs ([#5920](https://github.com/kedacore/keda/issues/5920)) - **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806)) -- **MongoDB Scaler**: MongoDB url parses correctly `+srv` scheme ([#5760](https://github.com/kedacore/keda/issues/5760)) - **New Relic Scaler**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp ([#5944](https://github.com/kedacore/keda/issues/5944)) -- **ScaledJob**: Fix ScaledJob ignores failing trigger(s) error ([#5922](https://github.com/kedacore/keda/issues/5922)) ### Deprecations @@ -115,6 +107,18 @@ New deprecation(s): - **Azure Services**: Migrated to `github.com/Azure/azure-sdk-for-go` ([#5470](https://github.com/kedacore/keda/issues/5470)) - **IBM MQ Scaler**: Adding e2e test ([#1287](https://github.com/kedacore/keda/issues/1287)) +## v2.14.1 + +### Fixes + +- **General**: Do not delete running Jobs on KEDA restart ([#5656](https://github.com/kedacore/keda/issues/5656)) +- **General**: Fix CVE-2024-24790, CVE-2024-24789, and CVE-2024-24791 in stdlib. ([#5971](https://github.com/kedacore/keda/pull/5971)) +- **General**: Fix CVE-2024-35255 in github.com/Azure/azure-sdk-for-go/sdk/azidentity ([#5971](https://github.com/kedacore/keda/pull/5971)) +- **General**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp ([#5971](https://github.com/kedacore/keda/pull/5971)) +- **General**: Fix ScaledJob ignores failing trigger(s) error ([#5922](https://github.com/kedacore/keda/issues/5922))- **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806)) +- **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) +- **MongoDB Scaler**: MongoDB url parses correctly `+srv` scheme ([#5760](https://github.com/kedacore/keda/issues/5760)) + ## v2.14.0 ### New From 18ed5156dff085eb09d1b4084ed3e8a62d0bafd7 Mon Sep 17 00:00:00 2001 From: Dao Thanh Tung Date: Wed, 31 Jul 2024 21:34:42 +0100 Subject: [PATCH 34/37] Remove deprecated cortexOrgId in prometheus scaler (#5990) * Remove deprecated cortexOrgId in prometheus scaler Signed-off-by: dttung2905 * Move to breaking changes Signed-off-by: dttung2905 --------- Signed-off-by: dttung2905 Signed-off-by: novoselov --- CHANGELOG.md | 6 ++++-- pkg/scalers/prometheus_scaler.go | 4 ---- pkg/scalers/prometheus_scaler_test.go | 10 ++++------ 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5df58c495d9..39a1dca9031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -94,12 +94,14 @@ Here is an overview of all new **experimental** features: ### Deprecations -You can find all deprecations in [this overview](https://github.com/kedacore/keda/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3Abreaking-change) and [join the discussion here](https://github.com/kedacore/keda/discussions/categories/deprecations). - New deprecation(s): - TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +### Breaking Changes + +- **Prometheus Scaler**: Remove deprecated field `cortexOrgID` from Prometheus scaler ([#5538](https://github.com/kedacore/keda/issues/5538)) + ### Other - **General**: Added Pre Regex check before building image in e2e test ([#5783](https://github.com/kedacore/keda/issues/5783)) diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go index 99fc6f9f5ea..5a0516f42a0 100644 --- a/pkg/scalers/prometheus_scaler.go +++ b/pkg/scalers/prometheus_scaler.go @@ -32,7 +32,6 @@ const ( promThreshold = "threshold" promActivationThreshold = "activationThreshold" promNamespace = "namespace" - promCortexScopeOrgID = "cortexOrgID" promCustomHeaders = "customHeaders" ignoreNullValues = "ignoreNullValues" unsafeSsl = "unsafeSsl" @@ -62,9 +61,6 @@ type prometheusMetadata struct { CustomHeaders map[string]string `keda:"name=customHeaders, order=triggerMetadata, optional"` IgnoreNullValues bool `keda:"name=ignoreNullValues, order=triggerMetadata, optional, default=true"` UnsafeSSL bool `keda:"name=unsafeSsl, order=triggerMetadata, optional"` - - // deprecated - CortexOrgID string `keda:"name=cortexOrgID, order=triggerMetadata, optional, deprecated=use customHeaders instead"` } type promQueryResult struct { diff --git a/pkg/scalers/prometheus_scaler_test.go b/pkg/scalers/prometheus_scaler_test.go index 694a6a7c624..b6eb46cfd0c 100644 --- a/pkg/scalers/prometheus_scaler_test.go +++ b/pkg/scalers/prometheus_scaler_test.go @@ -60,8 +60,6 @@ var testPromMetadata = []parsePrometheusMetadataTestData{ {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "customHeaders": "key1=value1,key2=value2"}, false}, // customHeaders with wrong format {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "customHeaders": "key1=value1,key2"}, true}, - // deprecated cortexOrgID - {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "cortexOrgID": "my-org"}, true}, // queryParameters {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "queryParameters": "key1=value1,key2=value2"}, false}, // queryParameters with wrong format @@ -174,7 +172,7 @@ func TestPrometheusScalerAuthParams(t *testing.T) { } } -type prometheusQromQueryResultTestData struct { +type prometheusPromQueryResultTestData struct { name string bodyStr string responseStatus int @@ -184,7 +182,7 @@ type prometheusQromQueryResultTestData struct { unsafeSsl bool } -var testPromQueryResult = []prometheusQromQueryResultTestData{ +var testPromQueryResult = []prometheusPromQueryResultTestData{ { name: "no results", bodyStr: `{}`, @@ -339,7 +337,7 @@ func TestPrometheusScalerExecutePromQuery(t *testing.T) { } func TestPrometheusScalerCustomHeaders(t *testing.T) { - testData := prometheusQromQueryResultTestData{ + testData := prometheusPromQueryResultTestData{ name: "no values", bodyStr: `{"data":{"result":[]}}`, responseStatus: http.StatusOK, @@ -379,7 +377,7 @@ func TestPrometheusScalerCustomHeaders(t *testing.T) { } func TestPrometheusScalerExecutePromQueryParameters(t *testing.T) { - testData := prometheusQromQueryResultTestData{ + testData := prometheusPromQueryResultTestData{ name: "no values", bodyStr: `{"data":{"result":[]}}`, responseStatus: http.StatusOK, From 409d9a9ce81e2eff79c42aa83e95b9722724df89 Mon Sep 17 00:00:00 2001 From: Max Cao Date: Wed, 31 Jul 2024 16:27:41 -0700 Subject: [PATCH 35/37] fix: reassign err to prevent invalid return in KedaProvider `GetExternalMetric` (#6010) Signed-off-by: Max Cao Signed-off-by: novoselov --- CHANGELOG.md | 1 + pkg/provider/provider.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39a1dca9031..b101296a375 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ Here is an overview of all new **experimental** features: ### Fixes - **General**: Check for missing CRD references and sample CRs ([#5920](https://github.com/kedacore/keda/issues/5920)) +- **General**: Fix panic in `KedaProvider` when getting metrics from Metrics Service if the gRPC Server connection is not established ([#6009](https://github.com/kedacore/keda/issues/6009)) - **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806)) - **New Relic Scaler**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp ([#5944](https://github.com/kedacore/keda/issues/5944)) diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index 5a32360fc8d..d5c5e3c6593 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -86,7 +86,8 @@ func (p *KedaProvider) GetExternalMetric(ctx context.Context, namespace string, // Get Metrics from Metrics Service gRPC Server if !p.grpcClient.WaitForConnectionReady(ctx, logger) { grpcClientConnected = false - logger.Error(fmt.Errorf("timeout while waiting to establish gRPC connection to KEDA Metrics Service server"), "timeout", "server", p.grpcClient.GetServerURL()) + err := fmt.Errorf("timeout while waiting to establish gRPC connection to KEDA Metrics Service server") + logger.Error(err, "timeout", "server", p.grpcClient.GetServerURL()) return nil, err } if !grpcClientConnected { From 32ee3a3e7d2ad0f71c79d572f8c6e02da3efdf42 Mon Sep 17 00:00:00 2001 From: novoselov Date: Thu, 1 Aug 2024 16:22:43 +0500 Subject: [PATCH 36/37] fix Signed-off-by: novoselov --- pkg/scalers/selenium_grid_scaler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index 3e035047edc..e1c6963ae7c 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -153,7 +153,7 @@ func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2.Metri func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.Logger) (int64, error) { body, err := json.Marshal(map[string]string{ - "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId } } }", + "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId, platformName } } }", }) if err != nil { From eea7dcda67c97dddf6011aedb1f9760ab3e12f27 Mon Sep 17 00:00:00 2001 From: novoselov Date: Thu, 1 Aug 2024 16:42:49 +0500 Subject: [PATCH 37/37] Signed-off-by: Doofus100500 Signed-off-by: novoselov