-
Notifications
You must be signed in to change notification settings - Fork 629
/
api_op_CreateLocationHdfs.go
206 lines (179 loc) · 6.94 KB
/
api_op_CreateLocationHdfs.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
// Code generated by smithy-go-codegen DO NOT EDIT.
package datasync
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/service/datasync/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a transfer location for a Hadoop Distributed File System (HDFS).
// DataSync can use this location as a source or destination for transferring data.
// Before you begin, make sure that you understand how DataSync accesses HDFS
// clusters (https://docs.aws.amazon.com/datasync/latest/userguide/create-hdfs-location.html#accessing-hdfs)
// .
func (c *Client) CreateLocationHdfs(ctx context.Context, params *CreateLocationHdfsInput, optFns ...func(*Options)) (*CreateLocationHdfsOutput, error) {
if params == nil {
params = &CreateLocationHdfsInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateLocationHdfs", params, optFns, c.addOperationCreateLocationHdfsMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateLocationHdfsOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateLocationHdfsInput struct {
// The Amazon Resource Names (ARNs) of the agents that are used to connect to the
// HDFS cluster.
//
// This member is required.
AgentArns []string
// The type of authentication used to determine the identity of the user.
//
// This member is required.
AuthenticationType types.HdfsAuthenticationType
// The NameNode that manages the HDFS namespace. The NameNode performs operations
// such as opening, closing, and renaming files and directories. The NameNode
// contains the information to map blocks of data to the DataNodes. You can use
// only one NameNode.
//
// This member is required.
NameNodes []types.HdfsNameNode
// The size of data blocks to write into the HDFS cluster. The block size must be
// a multiple of 512 bytes. The default block size is 128 mebibytes (MiB).
BlockSize *int32
// The Kerberos key table (keytab) that contains mappings between the defined
// Kerberos principal and the encrypted keys. You can load the keytab from a file
// by providing the file's address. If you're using the CLI, it performs base64
// encoding for you. Otherwise, provide the base64-encoded text. If KERBEROS is
// specified for AuthenticationType , this parameter is required.
KerberosKeytab []byte
// The krb5.conf file that contains the Kerberos configuration information. You
// can load the krb5.conf file by providing the file's address. If you're using
// the CLI, it performs the base64 encoding for you. Otherwise, provide the
// base64-encoded text. If KERBEROS is specified for AuthenticationType , this
// parameter is required.
KerberosKrb5Conf []byte
// The Kerberos principal with access to the files and folders on the HDFS
// cluster. If KERBEROS is specified for AuthenticationType , this parameter is
// required.
KerberosPrincipal *string
// The URI of the HDFS cluster's Key Management Server (KMS).
KmsKeyProviderUri *string
// The Quality of Protection (QOP) configuration specifies the Remote Procedure
// Call (RPC) and data transfer protection settings configured on the Hadoop
// Distributed File System (HDFS) cluster. If QopConfiguration isn't specified,
// RpcProtection and DataTransferProtection default to PRIVACY . If you set
// RpcProtection or DataTransferProtection , the other parameter assumes the same
// value.
QopConfiguration *types.QopConfiguration
// The number of DataNodes to replicate the data to when writing to the HDFS
// cluster. By default, data is replicated to three DataNodes.
ReplicationFactor *int32
// The user name used to identify the client on the host operating system. If
// SIMPLE is specified for AuthenticationType , this parameter is required.
SimpleUser *string
// A subdirectory in the HDFS cluster. This subdirectory is used to read data from
// or write data to the HDFS cluster. If the subdirectory isn't specified, it will
// default to / .
Subdirectory *string
// The key-value pair that represents the tag that you want to add to the
// location. The value can be an empty string. We recommend using tags to name your
// resources.
Tags []types.TagListEntry
noSmithyDocumentSerde
}
type CreateLocationHdfsOutput struct {
// The ARN of the source HDFS cluster location that's created.
LocationArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateLocationHdfsMiddlewares(stack *middleware.Stack, options Options) (err error) {
if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
return err
}
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateLocationHdfs{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateLocationHdfs{}, middleware.After)
if err != nil {
return err
}
if err := addProtocolFinalizerMiddlewares(stack, options, "CreateLocationHdfs"); err != nil {
return fmt.Errorf("add protocol finalizers: %v", err)
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = addClientRequestID(stack); err != nil {
return err
}
if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addComputePayloadSHA256(stack); err != nil {
return err
}
if err = addRetry(stack, options); err != nil {
return err
}
if err = addRawResponseToMetadata(stack); err != nil {
return err
}
if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addOpCreateLocationHdfsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateLocationHdfs(options.Region), middleware.Before); err != nil {
return err
}
if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateLocationHdfs(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
OperationName: "CreateLocationHdfs",
}
}