diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQuery.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQuery.java index af5ced9d4230..aa516c31fb54 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQuery.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQuery.java @@ -662,4 +662,12 @@ Page> listTableData(TableId tableId, TableDataListOption... opt * @throws BigQueryException upon failure */ QueryResponse getQueryResults(JobId job, QueryResultsOption... options) throws BigQueryException; + + /** + * Returns a channel to write data to be inserted into a BigQuery table. Data format and other + * options can be configured using the {@link LoadConfiguration} parameter. + * + * @throws BigQueryException upon failure + */ + TableDataWriteChannel writer(LoadConfiguration loadConfiguration); } diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQueryImpl.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQueryImpl.java index 9bc89206889b..3a1cc658bef3 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQueryImpl.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/BigQueryImpl.java @@ -619,6 +619,10 @@ private static QueryResult.Builder transformQueryResults(JobId jobId, List optionMap(Option... options) { Map optionMap = Maps.newEnumMap(BigQueryRpc.Option.class); for (Option option : options) { @@ -698,8 +702,7 @@ public TableId apply(TableId tableId) { if (job instanceof LoadJobInfo) { LoadJobInfo loadJob = (LoadJobInfo) job; LoadJobInfo.Builder loadBuilder = loadJob.toBuilder(); - loadBuilder.destinationTable(setProjectId(loadJob.destinationTable())); - return loadBuilder.build(); + return loadBuilder.configuration(setProjectId(loadJob.configuration())).build(); } return job; } @@ -711,4 +714,10 @@ private QueryRequest setProjectId(QueryRequest request) { } return builder.build(); } + + private LoadConfiguration setProjectId(LoadConfiguration configuration) { + LoadConfiguration.Builder builder = configuration.toBuilder(); + builder.destinationTable(setProjectId(configuration.destinationTable())); + return builder.build(); + } } diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/CopyJobInfo.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/CopyJobInfo.java index a3247b78d5b8..bd346a8e1633 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/CopyJobInfo.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/CopyJobInfo.java @@ -95,8 +95,8 @@ public Builder destinationTable(TableId destinationTable) { /** * Sets whether the job is allowed to create new tables. * - * @see - * Jobs: Link Configuration + * @see + * Create Disposition */ public Builder createDisposition(CreateDisposition createDisposition) { this.createDisposition = createDisposition; @@ -106,8 +106,8 @@ public Builder createDisposition(CreateDisposition createDisposition) { /** * Sets the action that should occur if the destination table already exists. * - * @see - * Jobs: Link Configuration + * @see + * Write Disposition */ public Builder writeDisposition(WriteDisposition writeDisposition) { this.writeDisposition = writeDisposition; @@ -145,8 +145,8 @@ public TableId destinationTable() { /** * Returns whether the job is allowed to create new tables. * - * @see - * Jobs: Copy Configuration + * @see + * Create Disposition */ public CreateDisposition createDisposition() { return this.createDisposition; @@ -155,8 +155,8 @@ public CreateDisposition createDisposition() { /** * Returns the action that should occur if the destination table already exists. * - * @see - * Jobs: Copy Configuration + * @see + * Write Disposition */ public WriteDisposition writeDisposition() { return writeDisposition; diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/LoadConfiguration.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/LoadConfiguration.java new file mode 100644 index 000000000000..a0436bd68f02 --- /dev/null +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/LoadConfiguration.java @@ -0,0 +1,381 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.bigquery; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.bigquery.model.JobConfigurationLoad; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.gcloud.bigquery.JobInfo.CreateDisposition; +import com.google.gcloud.bigquery.JobInfo.WriteDisposition; + +import java.io.Serializable; +import java.nio.channels.SeekableByteChannel; +import java.util.List; +import java.util.Objects; + +/** + * Google BigQuery Configuration for a load operation. A load configuration can be used to build a + * {@link LoadJobInfo} or to load data into a table with a {@link com.google.gcloud.WriteChannel} + * ({@link BigQuery#writer(LoadConfiguration)}). + */ +public class LoadConfiguration implements Serializable { + + private static final long serialVersionUID = 470267591917413578L; + + private final TableId destinationTable; + private final CreateDisposition createDisposition; + private final WriteDisposition writeDisposition; + private final FormatOptions formatOptions; + private final Integer maxBadRecords; + private final Schema schema; + private final Boolean ignoreUnknownValues; + private final List projectionFields; + + public static final class Builder { + + private TableId destinationTable; + private CreateDisposition createDisposition; + private WriteDisposition writeDisposition; + private FormatOptions formatOptions; + private Integer maxBadRecords; + private Schema schema; + private Boolean ignoreUnknownValues; + private List projectionFields; + + private Builder() {} + + private Builder(LoadConfiguration loadConfiguration) { + this.destinationTable = loadConfiguration.destinationTable; + this.createDisposition = loadConfiguration.createDisposition; + this.writeDisposition = loadConfiguration.writeDisposition; + this.formatOptions = loadConfiguration.formatOptions; + this.maxBadRecords = loadConfiguration.maxBadRecords; + this.schema = loadConfiguration.schema; + this.ignoreUnknownValues = loadConfiguration.ignoreUnknownValues; + this.projectionFields = loadConfiguration.projectionFields; + } + + private Builder(JobConfigurationLoad loadConfigurationPb) { + this.destinationTable = TableId.fromPb(loadConfigurationPb.getDestinationTable()); + if (loadConfigurationPb.getCreateDisposition() != null) { + this.createDisposition = + CreateDisposition.valueOf(loadConfigurationPb.getCreateDisposition()); + } + if (loadConfigurationPb.getWriteDisposition() != null) { + this.writeDisposition = WriteDisposition.valueOf(loadConfigurationPb.getWriteDisposition()); + } + if (loadConfigurationPb.getSourceFormat() != null) { + this.formatOptions = FormatOptions.of(loadConfigurationPb.getSourceFormat()); + } + if (loadConfigurationPb.getAllowJaggedRows() != null + || loadConfigurationPb.getAllowQuotedNewlines() != null + || loadConfigurationPb.getEncoding() != null + || loadConfigurationPb.getFieldDelimiter() != null + || loadConfigurationPb.getQuote() != null + || loadConfigurationPb.getSkipLeadingRows() != null) { + CsvOptions.Builder builder = CsvOptions.builder() + .allowJaggedRows(loadConfigurationPb.getAllowJaggedRows()) + .allowQuotedNewLines(loadConfigurationPb.getAllowQuotedNewlines()) + .encoding(loadConfigurationPb.getEncoding()) + .fieldDelimiter(loadConfigurationPb.getFieldDelimiter()) + .quote(loadConfigurationPb.getQuote()) + .skipLeadingRows(loadConfigurationPb.getSkipLeadingRows()); + this.formatOptions = builder.build(); + } + this.maxBadRecords = loadConfigurationPb.getMaxBadRecords(); + if (loadConfigurationPb.getSchema() != null) { + this.schema = Schema.fromPb(loadConfigurationPb.getSchema()); + } + this.ignoreUnknownValues = loadConfigurationPb.getIgnoreUnknownValues(); + this.projectionFields = loadConfigurationPb.getProjectionFields(); + } + + /** + * Sets the destination table to load the data into. + */ + public Builder destinationTable(TableId destinationTable) { + this.destinationTable = destinationTable; + return this; + } + + /** + * Sets whether the job is allowed to create new tables. + * + * @see + * Create Disposition + */ + public Builder createDisposition(CreateDisposition createDisposition) { + this.createDisposition = createDisposition; + return this; + } + + /** + * Sets the action that should occur if the destination table already exists. + * + * @see + * Write Disposition + */ + public Builder writeDisposition(WriteDisposition writeDisposition) { + this.writeDisposition = writeDisposition; + return this; + } + + /** + * Sets the source format, and possibly some parsing options, of the external data. Supported + * formats are {@code CSV}, {@code NEWLINE_DELIMITED_JSON} and {@code DATASTORE_BACKUP}. If not + * specified, {@code CSV} format is assumed. + * + * + * Source Format + */ + public Builder formatOptions(FormatOptions formatOptions) { + this.formatOptions = formatOptions; + return this; + } + + /** + * Sets the maximum number of bad records that BigQuery can ignore when running the job. If the + * number of bad records exceeds this value, an invalid error is returned in the job result. + * By default no bad record is ignored. + */ + public Builder maxBadRecords(Integer maxBadRecords) { + this.maxBadRecords = maxBadRecords; + return this; + } + + /** + * Sets the schema for the destination table. The schema can be omitted if the destination table + * already exists, or if you're loading data from Google Cloud Datastore. + */ + public Builder schema(Schema schema) { + this.schema = schema; + return this; + } + + /** + * Sets whether BigQuery should allow extra values that are not represented in the table schema. + * If {@code true}, the extra values are ignored. If {@code false}, records with extra columns + * are treated as bad records, and if there are too many bad records, an invalid error is + * returned in the job result. By default unknown values are not allowed. + */ + public Builder ignoreUnknownValues(Boolean ignoreUnknownValues) { + this.ignoreUnknownValues = ignoreUnknownValues; + return this; + } + + /** + * Sets which entity properties to load into BigQuery from a Cloud Datastore backup. This field + * is only used if the source format is set to {@code DATASTORE_BACKUP}. Property names are case + * sensitive and must be top-level properties. If no properties are specified, BigQuery loads + * all properties. If any named property isn't found in the Cloud Datastore backup, an invalid + * error is returned in the job result. + */ + public Builder projectionFields(List projectionFields) { + this.projectionFields = + projectionFields != null ? ImmutableList.copyOf(projectionFields) : null; + return this; + } + + public LoadConfiguration build() { + return new LoadConfiguration(this); + } + } + + private LoadConfiguration(Builder builder) { + this.destinationTable = checkNotNull(builder.destinationTable); + this.createDisposition = builder.createDisposition; + this.writeDisposition = builder.writeDisposition; + this.formatOptions = builder.formatOptions; + this.maxBadRecords = builder.maxBadRecords; + this.schema = builder.schema; + this.ignoreUnknownValues = builder.ignoreUnknownValues; + this.projectionFields = builder.projectionFields; + } + + /** + * Returns the destination table to load the data into. + */ + public TableId destinationTable() { + return destinationTable; + } + + /** + * Returns whether the job is allowed to create new tables. + * + * @see + * Create Disposition + */ + public CreateDisposition createDisposition() { + return this.createDisposition; + } + + /** + * Returns the action that should occur if the destination table already exists. + * + * @see + * Write Disposition + */ + public WriteDisposition writeDisposition() { + return writeDisposition; + } + + /** + * Returns additional properties used to parse CSV data (used when {@link #format()} is set + * to CSV). Returns {@code null} if not set. + */ + public CsvOptions csvOptions() { + return formatOptions instanceof CsvOptions ? (CsvOptions) formatOptions : null; + } + + /** + * Returns the maximum number of bad records that BigQuery can ignore when running the job. If the + * number of bad records exceeds this value, an invalid error is returned in the job result. + * By default no bad record is ignored. + */ + public Integer maxBadRecords() { + return maxBadRecords; + } + + /** + * Returns the schema for the destination table, if set. Returns {@code null} otherwise. + */ + public Schema schema() { + return schema; + } + + /** + * Returns the format of the data files. + */ + public String format() { + return formatOptions != null ? formatOptions.type() : null; + } + + /** + * Returns whether BigQuery should allow extra values that are not represented in the table + * schema. If {@code true}, the extra values are ignored. If {@code true}, records with extra + * columns are treated as bad records, and if there are too many bad records, an invalid error is + * returned in the job result. By default unknown values are not allowed. + */ + public Boolean ignoreUnknownValues() { + return ignoreUnknownValues; + } + + /** + * Returns which entity properties to load into BigQuery from a Cloud Datastore backup. This field + * is only used if the source format is set to {@code DATASTORE_BACKUP}. Property names are case + * sensitive and must be top-level properties. If no properties are specified, BigQuery loads + * all properties. If any named property isn't found in the Cloud Datastore backup, an invalid + * error is returned in the job result. + */ + public List projectionFields() { + return projectionFields; + } + + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("destinationTable", destinationTable) + .add("createDisposition", createDisposition) + .add("writeDisposition", writeDisposition) + .add("formatOptions", formatOptions) + .add("maxBadRecords", maxBadRecords) + .add("schema", schema) + .add("ignoreUnknownValue", ignoreUnknownValues) + .add("projectionFields", projectionFields) + .toString(); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof LoadConfiguration + && Objects.equals(toPb(), ((LoadConfiguration) obj).toPb()); + } + + @Override + public int hashCode() { + return Objects.hash(destinationTable, createDisposition, writeDisposition, formatOptions, + maxBadRecords, schema, ignoreUnknownValues, projectionFields); + } + + JobConfigurationLoad toPb() { + JobConfigurationLoad loadConfigurationPb = new JobConfigurationLoad(); + loadConfigurationPb.setDestinationTable(destinationTable.toPb()); + if (createDisposition != null) { + loadConfigurationPb.setCreateDisposition(createDisposition.toString()); + } + if (writeDisposition != null) { + loadConfigurationPb.setWriteDisposition(writeDisposition.toString()); + } + if (csvOptions() != null) { + CsvOptions csvOptions = csvOptions(); + loadConfigurationPb.setFieldDelimiter(csvOptions.fieldDelimiter()) + .setAllowJaggedRows(csvOptions.allowJaggedRows()) + .setAllowQuotedNewlines(csvOptions.allowQuotedNewLines()) + .setEncoding(csvOptions.encoding()) + .setQuote(csvOptions.quote()) + .setSkipLeadingRows(csvOptions.skipLeadingRows()); + } + if (schema != null) { + loadConfigurationPb.setSchema(schema.toPb()); + } + if (formatOptions != null) { + loadConfigurationPb.setSourceFormat(formatOptions.type()); + } + loadConfigurationPb.setMaxBadRecords(maxBadRecords); + loadConfigurationPb.setIgnoreUnknownValues(ignoreUnknownValues); + loadConfigurationPb.setProjectionFields(projectionFields); + return loadConfigurationPb; + } + + static LoadConfiguration fromPb(JobConfigurationLoad configurationPb) { + return new Builder(configurationPb).build(); + } + + /** + * Creates a builder for a BigQuery Load Configuration given the destination table. + */ + public static Builder builder(TableId destinationTable) { + return new Builder().destinationTable(destinationTable); + } + + /** + * Creates a builder for a BigQuery Load Configuration given the destination table and format. + */ + public static Builder builder(TableId destinationTable, FormatOptions format) { + return new Builder().destinationTable(destinationTable).formatOptions(format); + } + + /** + * Returns a BigQuery Load Configuration for the given destination table. + */ + public static LoadConfiguration of(TableId destinationTable) { + return builder(destinationTable).build(); + } + + /** + * Returns a BigQuery Load Configuration for the given destination table and format. + */ + public static LoadConfiguration of(TableId destinationTable, FormatOptions format) { + return builder(destinationTable).formatOptions(format).build(); + } +} diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/LoadJobInfo.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/LoadJobInfo.java index 1120bbbacf3f..4f8d03cbc6a9 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/LoadJobInfo.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/LoadJobInfo.java @@ -34,81 +34,29 @@ */ public class LoadJobInfo extends JobInfo { - private static final long serialVersionUID = 2515503817007974115L; + private static final long serialVersionUID = 6349304826867750535L; private final List sourceUris; - private final TableId destinationTable; - private final CreateDisposition createDisposition; - private final WriteDisposition writeDisposition; - private final FormatOptions formatOptions; - private final Integer maxBadRecords; - private final Schema schema; - private final Boolean ignoreUnknownValues; - private final List projectionFields; + private final LoadConfiguration configuration; public static final class Builder extends JobInfo.Builder { private List sourceUris; - private TableId destinationTable; - private CreateDisposition createDisposition; - private WriteDisposition writeDisposition; - private FormatOptions formatOptions; - private Integer maxBadRecords; - private Schema schema; - private Boolean ignoreUnknownValues; - private List projectionFields; + private LoadConfiguration configuration; private Builder() {} private Builder(LoadJobInfo jobInfo) { super(jobInfo); this.sourceUris = jobInfo.sourceUris; - this.destinationTable = jobInfo.destinationTable; - this.createDisposition = jobInfo.createDisposition; - this.writeDisposition = jobInfo.writeDisposition; - this.formatOptions = jobInfo.formatOptions; - this.maxBadRecords = jobInfo.maxBadRecords; - this.schema = jobInfo.schema; - this.ignoreUnknownValues = jobInfo.ignoreUnknownValues; - this.projectionFields = jobInfo.projectionFields; + this.configuration = jobInfo.configuration; } private Builder(Job jobPb) { super(jobPb); JobConfigurationLoad loadConfigurationPb = jobPb.getConfiguration().getLoad(); + this.configuration = LoadConfiguration.fromPb(loadConfigurationPb); this.sourceUris = loadConfigurationPb.getSourceUris(); - this.destinationTable = TableId.fromPb(loadConfigurationPb.getDestinationTable()); - if (loadConfigurationPb.getCreateDisposition() != null) { - this.createDisposition = - CreateDisposition.valueOf(loadConfigurationPb.getCreateDisposition()); - } - if (loadConfigurationPb.getWriteDisposition() != null) { - this.writeDisposition = WriteDisposition.valueOf(loadConfigurationPb.getWriteDisposition()); - } - if (loadConfigurationPb.getSourceFormat() != null) { - this.formatOptions = FormatOptions.of(loadConfigurationPb.getSourceFormat()); - } - if (loadConfigurationPb.getAllowJaggedRows() != null - || loadConfigurationPb.getAllowQuotedNewlines() != null - || loadConfigurationPb.getEncoding() != null - || loadConfigurationPb.getFieldDelimiter() != null - || loadConfigurationPb.getQuote() != null - || loadConfigurationPb.getSkipLeadingRows() != null) { - CsvOptions.Builder builder = CsvOptions.builder() - .allowJaggedRows(loadConfigurationPb.getAllowJaggedRows()) - .allowQuotedNewLines(loadConfigurationPb.getAllowQuotedNewlines()) - .encoding(loadConfigurationPb.getEncoding()) - .fieldDelimiter(loadConfigurationPb.getFieldDelimiter()) - .quote(loadConfigurationPb.getQuote()) - .skipLeadingRows(loadConfigurationPb.getSkipLeadingRows()); - this.formatOptions = builder.build(); - } - this.maxBadRecords = loadConfigurationPb.getMaxBadRecords(); - if (loadConfigurationPb.getSchema() != null) { - this.schema = Schema.fromPb(loadConfigurationPb.getSchema()); - } - this.ignoreUnknownValues = loadConfigurationPb.getIgnoreUnknownValues(); - this.projectionFields = loadConfigurationPb.getProjectionFields(); } /** @@ -122,88 +70,10 @@ public Builder sourceUris(List sourceUris) { } /** - * Sets the destination table to load the data into. + * Sets the configuration for the BigQuery Load Job. */ - public Builder destinationTable(TableId destinationTable) { - this.destinationTable = destinationTable; - return this; - } - - /** - * Sets whether the job is allowed to create new tables. - * - * @see - * Jobs: Load Configuration - */ - public Builder createDisposition(CreateDisposition createDisposition) { - this.createDisposition = createDisposition; - return this; - } - - /** - * Sets the action that should occur if the destination table already exists. - * - * @see - * Jobs: Load Configuration - */ - public Builder writeDisposition(WriteDisposition writeDisposition) { - this.writeDisposition = writeDisposition; - return this; - } - - /** - * Sets the source format, and possibly some parsing options, of the external data. Supported - * formats are {@code CSV}, {@code NEWLINE_DELIMITED_JSON} and {@code DATASTORE_BACKUP}. If not - * specified, {@code CSV} format is assumed. - * - * - * Source Format - */ - public Builder formatOptions(FormatOptions formatOptions) { - this.formatOptions = formatOptions; - return this; - } - - /** - * Sets the maximum number of bad records that BigQuery can ignore when running the job. If the - * number of bad records exceeds this value, an invalid error is returned in the job result. - * By default no bad record is ignored. - */ - public Builder maxBadRecords(Integer maxBadRecords) { - this.maxBadRecords = maxBadRecords; - return this; - } - - /** - * Sets the schema for the destination table. The schema can be omitted if the destination table - * already exists, or if you're loading data from Google Cloud Datastore. - */ - public Builder schema(Schema schema) { - this.schema = schema; - return this; - } - - /** - * Sets whether BigQuery should allow extra values that are not represented in the table schema. - * If {@code true}, the extra values are ignored. If {@code true}, records with extra columns - * are treated as bad records, and if there are too many bad records, an invalid error is - * returned in the job result. By default unknown values are not allowed. - */ - public Builder ignoreUnknownValues(Boolean ignoreUnknownValues) { - this.ignoreUnknownValues = ignoreUnknownValues; - return this; - } - - /** - * Sets which entity properties to load into BigQuery from a Cloud Datastore backup. This field - * is only used if the source format is set to {@code DATASTORE_BACKUP}. Property names are case - * sensitive and must be top-level properties. If no properties are specified, BigQuery loads - * all properties. If any named property isn't found in the Cloud Datastore backup, an invalid - * error is returned in the job result. - */ - public Builder projectionFields(List projectionFields) { - this.projectionFields = - projectionFields != null ? ImmutableList.copyOf(projectionFields) : null; + public Builder configuration(LoadConfiguration configuration) { + this.configuration = configuration; return this; } @@ -216,14 +86,7 @@ public LoadJobInfo build() { private LoadJobInfo(Builder builder) { super(builder); this.sourceUris = builder.sourceUris; - this.destinationTable = checkNotNull(builder.destinationTable); - this.createDisposition = builder.createDisposition; - this.writeDisposition = builder.writeDisposition; - this.formatOptions = builder.formatOptions; - this.maxBadRecords = builder.maxBadRecords; - this.schema = builder.schema; - this.ignoreUnknownValues = builder.ignoreUnknownValues; - this.projectionFields = builder.projectionFields; + this.configuration = builder.configuration; } /** @@ -236,82 +99,10 @@ public List sourceUris() { } /** - * Returns the destination table to load the data into. + * Returns the configuration for the BigQuery Load Job. */ - public TableId destinationTable() { - return destinationTable; - } - - /** - * Returns whether the job is allowed to create new tables. - * - * @see - * Jobs: Load Configuration - */ - public CreateDisposition createDisposition() { - return this.createDisposition; - } - - /** - * Returns the action that should occur if the destination table already exists. - * - * @see - * Jobs: Load Configuration - */ - public WriteDisposition writeDisposition() { - return writeDisposition; - } - - /** - * Returns additional properties used to parse CSV data (used when {@link #format()} is set - * to CSV). Returns {@code null} if not set. - */ - public CsvOptions csvOptions() { - return formatOptions instanceof CsvOptions ? (CsvOptions) formatOptions : null; - } - - /** - * Returns the maximum number of bad records that BigQuery can ignore when running the job. If the - * number of bad records exceeds this value, an invalid error is returned in the job result. - * By default no bad record is ignored. - */ - public Integer maxBadRecords() { - return maxBadRecords; - } - - /** - * Returns the schema for the destination table, if set. Returns {@code null} otherwise. - */ - public Schema schema() { - return schema; - } - - /** - * Returns the format of the data files. - */ - public String format() { - return formatOptions != null ? formatOptions.type() : null; - } - - /** - * Returns whether BigQuery should allow extra values that are not represented in the table - * schema. If {@code true}, the extra values are ignored. If {@code true}, records with extra - * columns are treated as bad records, and if there are too many bad records, an invalid error is - * returned in the job result. By default unknown values are not allowed. - */ - public Boolean ignoreUnknownValues() { - return ignoreUnknownValues; - } - - /** - * Returns which entity properties to load into BigQuery from a Cloud Datastore backup. This field - * is only used if the source format is set to {@code DATASTORE_BACKUP}. Property names are case - * sensitive and must be top-level properties. If no properties are specified, BigQuery loads - * all properties. If any named property isn't found in the Cloud Datastore backup, an invalid - * error is returned in the job result. - */ - public List projectionFields() { - return projectionFields; + public LoadConfiguration configuration() { + return configuration; } @Override @@ -321,16 +112,7 @@ public Builder toBuilder() { @Override ToStringHelper toStringHelper() { - return super.toStringHelper() - .add("destinationTable", destinationTable) - .add("sourceUris", sourceUris) - .add("createDisposition", createDisposition) - .add("writeDisposition", writeDisposition) - .add("formatOptions", formatOptions) - .add("maxBadRecords", maxBadRecords) - .add("schema", schema) - .add("ignoreUnknownValue", ignoreUnknownValues) - .add("projectionFields", projectionFields); + return super.toStringHelper().add("sourceUris", sourceUris).add("configuration", configuration); } @Override @@ -340,122 +122,61 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(super.hashCode(), sourceUris, destinationTable, createDisposition, - writeDisposition, formatOptions, maxBadRecords, schema, ignoreUnknownValues, - projectionFields); + return Objects.hash(super.hashCode(), sourceUris, configuration); } @Override Job toPb() { - JobConfigurationLoad loadConfigurationPb = new JobConfigurationLoad(); + JobConfigurationLoad loadConfigurationPb = configuration.toPb(); loadConfigurationPb.setSourceUris(sourceUris); - loadConfigurationPb.setDestinationTable(destinationTable.toPb()); - if (createDisposition != null) { - loadConfigurationPb.setCreateDisposition(createDisposition.toString()); - } - if (writeDisposition != null) { - loadConfigurationPb.setWriteDisposition(writeDisposition.toString()); - } - if (csvOptions() != null) { - CsvOptions csvOptions = csvOptions(); - loadConfigurationPb.setFieldDelimiter(csvOptions.fieldDelimiter()) - .setAllowJaggedRows(csvOptions.allowJaggedRows()) - .setAllowQuotedNewlines(csvOptions.allowQuotedNewLines()) - .setEncoding(csvOptions.encoding()) - .setQuote(csvOptions.quote()) - .setSkipLeadingRows(csvOptions.skipLeadingRows()); - } - if (schema != null) { - loadConfigurationPb.setSchema(schema.toPb()); - } - if (formatOptions != null) { - loadConfigurationPb.setSourceFormat(formatOptions.type()); - } - loadConfigurationPb.setMaxBadRecords(maxBadRecords); - loadConfigurationPb.setIgnoreUnknownValues(ignoreUnknownValues); - loadConfigurationPb.setProjectionFields(projectionFields); return super.toPb().setConfiguration(new JobConfiguration().setLoad(loadConfigurationPb)); } /** - * Creates a builder for a BigQuery Load Job given destination table and source URI. + * Creates a builder for a BigQuery Load Job given the load configuration and source URI. */ - public static Builder builder(TableId destinationTable, String sourceUri) { - return builder(destinationTable, ImmutableList.of(checkNotNull(sourceUri))); + public static Builder builder(LoadConfiguration configuration, String sourceUri) { + return builder(configuration, ImmutableList.of(checkNotNull(sourceUri))); } /** - * Creates a builder for a BigQuery Load Job given destination table and source URIs. + * Creates a builder for a BigQuery Load Job given the load configuration and source URIs. */ - public static Builder builder(TableId destinationTable, List sourceUris) { - return new Builder().destinationTable(destinationTable).sourceUris(sourceUris); + public static Builder builder(LoadConfiguration configuration, List sourceUris) { + return new Builder().configuration(configuration).sourceUris(sourceUris); } /** - * Returns a BigQuery Load Job for the given destination table and source URI. Job's id is chosen + * Returns a BigQuery Load Job for the given load configuration and source URI. Job's id is chosen * by the service. */ - public static LoadJobInfo of(TableId destinationTable, String sourceUri) { - return builder(destinationTable, sourceUri).build(); + public static LoadJobInfo of(LoadConfiguration configuration, String sourceUri) { + return builder(configuration, sourceUri).build(); } /** - * Returns a BigQuery Load Job for the given destination table and source URIs. Job's id is chosen - * by the service. - */ - public static LoadJobInfo of(TableId destinationTable, List sourceUris) { - return builder(destinationTable, sourceUris).build(); - } - - /** - * Returns a BigQuery Load Job for the given destination table, format and source URI. Job's id is + * Returns a BigQuery Load Job for the given load configuration and source URIs. Job's id is * chosen by the service. */ - public static LoadJobInfo of(TableId destinationTable, FormatOptions format, String sourceUri) { - return builder(destinationTable, sourceUri).formatOptions(format).build(); - } - - /** - * Returns a BigQuery Load Job for the given destination table, format and source URIs. Job's id - * is chosen by the service. - */ - public static LoadJobInfo of(TableId destinationTable, FormatOptions format, - List sourceUris) { - return builder(destinationTable, sourceUris).formatOptions(format).build(); + public static LoadJobInfo of(LoadConfiguration configuration, List sourceUris) { + return builder(configuration, sourceUris).build(); } /** - * Returns a BigQuery Load Job for the given destination table and source URI. Job's id is set to + * Returns a BigQuery Load Job for the given load configuration and source URI. Job's id is set to * the provided value. */ - public static LoadJobInfo of(JobId jobId, TableId destinationTable, String sourceUri) { - return builder(destinationTable, sourceUri).jobId(jobId).build(); - } - - /** - * Returns a BigQuery Load Job for the given destination table and source URIs. Job's id is set to - * the provided value. - */ - public static LoadJobInfo of(JobId jobId, TableId destinationTable, List sourceUris) { - return builder(destinationTable, sourceUris).jobId(jobId).build(); - } - - /** - * Returns a BigQuery Load Job for the given destination table, format, and source URI. Job's id - * is set to the provided value. - */ - public static LoadJobInfo of(JobId jobId, TableId destinationTable, FormatOptions format, - String sourceUri) { - return builder(destinationTable, sourceUri).formatOptions(format).jobId(jobId).build(); + public static LoadJobInfo of(JobId jobId, LoadConfiguration configuration, String sourceUri) { + return builder(configuration, sourceUri).jobId(jobId).build(); } /** - * Returns a BigQuery Load Job for the given destination table, format and source URIs. Job's id - * is set to the provided value. + * Returns a BigQuery Load Job for the given load configuration and source URIs. Job's id is set + * to the provided value. */ - public static LoadJobInfo of(JobId jobId, TableId destinationTable, FormatOptions format, + public static LoadJobInfo of(JobId jobId, LoadConfiguration configuration, List sourceUris) { - return builder(destinationTable, sourceUris).formatOptions(format).jobId(jobId).build(); + return builder(configuration, sourceUris).jobId(jobId).build(); } @SuppressWarnings("unchecked") diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/QueryJobInfo.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/QueryJobInfo.java index 5a8b822e87ef..e11e8d6aa8ad 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/QueryJobInfo.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/QueryJobInfo.java @@ -197,8 +197,8 @@ public Builder userDefinedFunctions(List userDefinedFunctio /** * Sets whether the job is allowed to create tables. * - * @see - * Jobs: Query Configuration + * @see + * Create Disposition */ public Builder createDisposition(CreateDisposition createDisposition) { this.createDisposition = createDisposition; @@ -208,8 +208,8 @@ public Builder createDisposition(CreateDisposition createDisposition) { /** * Sets the action that should occur if the destination table already exists. * - * @see - * Jobs: Query Configuration + * @see + * Write Disposition */ public Builder writeDisposition(WriteDisposition writeDisposition) { this.writeDisposition = writeDisposition; @@ -319,8 +319,8 @@ public Boolean allowLargeResults() { /** * Returns whether the job is allowed to create new tables. * - * @see - * Jobs: Query Configuration + * @see + * Create Disposition */ public CreateDisposition createDisposition() { return createDisposition; @@ -399,8 +399,8 @@ public List userDefinedFunctions() { /** * Returns the action that should occur if the destination table already exists. * - * @see - * Jobs: Query Configuration + * @see + * Write Disposition */ public WriteDisposition writeDisposition() { return writeDisposition; diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/Table.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/Table.java index 1662f266b5ec..b4cc1df1d997 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/Table.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/Table.java @@ -262,8 +262,8 @@ Job load(FormatOptions format, String sourceUri, BigQuery.JobOption... options) */ Job load(FormatOptions format, List sourceUris, BigQuery.JobOption... options) throws BigQueryException { - return new Job(bigquery, bigquery.create(LoadJobInfo.of(info.tableId(), format, sourceUris), - options)); + LoadConfiguration configuration = LoadConfiguration.of(info.tableId(), format); + return new Job(bigquery, bigquery.create(LoadJobInfo.of(configuration, sourceUris), options)); } /** diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/TableDataWriteChannel.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/TableDataWriteChannel.java new file mode 100644 index 000000000000..34a0c62c805b --- /dev/null +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/bigquery/TableDataWriteChannel.java @@ -0,0 +1,109 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.bigquery; + +import static com.google.gcloud.RetryHelper.runWithRetries; +import static java.util.concurrent.Executors.callable; + +import com.google.gcloud.BaseWriteChannel; +import com.google.gcloud.RestorableState; +import com.google.gcloud.RetryHelper; +import com.google.gcloud.WriteChannel; + +import java.util.Arrays; + +/** + * WriteChannel implementation to stream data into a BigQuery table. + */ +class TableDataWriteChannel extends BaseWriteChannel { + + TableDataWriteChannel(BigQueryOptions options, LoadConfiguration loadConfiguration) { + this(options, loadConfiguration, options.rpc().open(loadConfiguration.toPb())); + } + + TableDataWriteChannel(BigQueryOptions options, LoadConfiguration config, String uploadId) { + super(options, config, uploadId); + } + + @Override + protected void write(final int length, final boolean last) { + try { + runWithRetries(callable(new Runnable() { + @Override + public void run() { + options.rpc().write(uploadId, buffer, 0, position, length, last); + } + }), options.retryParams(), BigQueryImpl.EXCEPTION_HANDLER); + } catch (RetryHelper.RetryHelperException e) { + throw BigQueryException.translateAndThrow(e); + } + } + + @Override + public RestorableState capture() { + byte[] bufferToSave = null; + if (isOpen) { + flush(); + bufferToSave = Arrays.copyOf(buffer, limit); + } + return StateImpl.builder(options, entity, uploadId) + .position(position) + .buffer(bufferToSave) + .isOpen(isOpen) + .chunkSize(chunkSize) + .build(); + } + + static class StateImpl extends BaseWriteChannel.BaseState { + + private static final long serialVersionUID = -787362105981823738L; + + StateImpl(Builder builder) { + super(builder); + } + + static class Builder + extends BaseWriteChannel.BaseState.Builder { + + private Builder(BigQueryOptions options, LoadConfiguration configuration, String uploadId) { + super(options, configuration, uploadId); + } + + public RestorableState build() { + return new StateImpl(this); + } + } + + static Builder builder(BigQueryOptions options, LoadConfiguration blobInfo, String uploadId) { + return new Builder(options, blobInfo, uploadId); + } + + @Override + public WriteChannel restore() { + TableDataWriteChannel channel = + new TableDataWriteChannel(serviceOptions, entity, uploadId); + if (buffer != null) { + channel.buffer = buffer.clone(); + channel.limit = buffer.length; + } + channel.position = position; + channel.isOpen = isOpen; + channel.chunkSize = chunkSize; + return channel; + } + } +} diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/BigQueryRpc.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/BigQueryRpc.java index d53ad838b802..5f17f60f2bb5 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/BigQueryRpc.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/BigQueryRpc.java @@ -19,6 +19,7 @@ import com.google.api.services.bigquery.model.Dataset; import com.google.api.services.bigquery.model.GetQueryResultsResponse; import com.google.api.services.bigquery.model.Job; +import com.google.api.services.bigquery.model.JobConfigurationLoad; import com.google.api.services.bigquery.model.QueryRequest; import com.google.api.services.bigquery.model.QueryResponse; import com.google.api.services.bigquery.model.Table; @@ -185,4 +186,26 @@ GetQueryResultsResponse getQueryResults(String jobId, Map options) throws BigQueryException; QueryResponse query(QueryRequest request) throws BigQueryException; + + /** + * Opens a resumable upload session to load data into a BigQuery table and returns an upload URI. + * + * @param configuration load configuration + * @throws BigQueryException upon failure + */ + String open(JobConfigurationLoad configuration) throws BigQueryException; + + /** + * Uploads the provided data to the resumable upload session at the specified position. + * + * @param uploadId the resumable upload session URI + * @param toWrite a byte array of data to upload + * @param toWriteOffset offset in the {@code toWrite} param to start writing from + * @param destOffset offset in the destination where to upload data to + * @param length the number of bytes to upload + * @param last {@code true} indicates that the last chunk is being uploaded + * @throws BigQueryException upon failure + */ + void write(String uploadId, byte[] toWrite, int toWriteOffset, long destOffset, int length, + boolean last) throws BigQueryException; } diff --git a/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/DefaultBigQueryRpc.java b/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/DefaultBigQueryRpc.java index 04e481b345c2..74fdeb74bd64 100644 --- a/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/DefaultBigQueryRpc.java +++ b/gcloud-java-bigquery/src/main/java/com/google/gcloud/spi/DefaultBigQueryRpc.java @@ -20,12 +20,22 @@ import static com.google.gcloud.spi.BigQueryRpc.Option.PAGE_TOKEN; import static com.google.gcloud.spi.BigQueryRpc.Option.START_INDEX; import static com.google.gcloud.spi.BigQueryRpc.Option.TIMEOUT; +import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.net.HttpURLConnection.HTTP_OK; import com.google.api.client.googleapis.json.GoogleJsonError; import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.client.http.ByteArrayContent; +import com.google.api.client.http.GenericUrl; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestFactory; import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; import com.google.api.client.http.HttpTransport; +import com.google.api.client.http.json.JsonHttpContent; +import com.google.api.client.json.JsonFactory; import com.google.api.client.json.jackson.JacksonFactory; import com.google.api.services.bigquery.Bigquery; import com.google.api.services.bigquery.model.Dataset; @@ -33,6 +43,8 @@ import com.google.api.services.bigquery.model.DatasetReference; import com.google.api.services.bigquery.model.GetQueryResultsResponse; import com.google.api.services.bigquery.model.Job; +import com.google.api.services.bigquery.model.JobConfiguration; +import com.google.api.services.bigquery.model.JobConfigurationLoad; import com.google.api.services.bigquery.model.JobList; import com.google.api.services.bigquery.model.JobStatus; import com.google.api.services.bigquery.model.QueryRequest; @@ -64,6 +76,10 @@ public class DefaultBigQueryRpc implements BigQueryRpc { public static final String DEFAULT_PROJECTION = "full"; // see: https://cloud.google.com/bigquery/troubleshooting-errors private static final Set RETRYABLE_CODES = ImmutableSet.of(500, 502, 503, 504); + private static final String BASE_RESUMABLE_URI = + "https://www.googleapis.com/upload/bigquery/v2/projects/"; + // see: https://cloud.google.com/bigquery/loading-data-post-request#resume-upload + private static final int HTTP_RESUME_INCOMPLETE = 308; private final BigQueryOptions options; private final Bigquery bigquery; @@ -417,4 +433,69 @@ public QueryResponse query(QueryRequest request) throws BigQueryException { throw translate(ex); } } + + @Override + public String open(JobConfigurationLoad configuration) throws BigQueryException { + try { + Job loadJob = new Job().setConfiguration(new JobConfiguration().setLoad(configuration)); + StringBuilder builder = new StringBuilder() + .append(BASE_RESUMABLE_URI) + .append(options.projectId()) + .append("/jobs"); + GenericUrl url = new GenericUrl(builder.toString()); + url.set("uploadType", "resumable"); + JsonFactory jsonFactory = bigquery.getJsonFactory(); + HttpRequestFactory requestFactory = bigquery.getRequestFactory(); + HttpRequest httpRequest = + requestFactory.buildPostRequest(url, new JsonHttpContent(jsonFactory, loadJob)); + httpRequest.getHeaders().set("X-Upload-Content-Value", "application/octet-stream"); + HttpResponse response = httpRequest.execute(); + return response.getHeaders().getLocation(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public void write(String uploadId, byte[] toWrite, int toWriteOffset, long destOffset, int length, + boolean last) throws BigQueryException { + try { + GenericUrl url = new GenericUrl(uploadId); + HttpRequest httpRequest = bigquery.getRequestFactory().buildPutRequest(url, + new ByteArrayContent(null, toWrite, toWriteOffset, length)); + long limit = destOffset + length; + StringBuilder range = new StringBuilder("bytes "); + range.append(destOffset).append('-').append(limit - 1).append('/'); + if (last) { + range.append(limit); + } else { + range.append('*'); + } + httpRequest.getHeaders().setContentRange(range.toString()); + int code; + String message; + IOException exception = null; + try { + HttpResponse response = httpRequest.execute(); + code = response.getStatusCode(); + message = response.getStatusMessage(); + } catch (HttpResponseException ex) { + exception = ex; + code = ex.getStatusCode(); + message = ex.getStatusMessage(); + } + if (!last && code != HTTP_RESUME_INCOMPLETE + || last && !(code == HTTP_OK || code == HTTP_CREATED)) { + if (exception != null) { + throw exception; + } + GoogleJsonError error = new GoogleJsonError(); + error.setCode(code); + error.setMessage(message); + throw translate(error); + } + } catch (IOException ex) { + throw translate(ex); + } + } } diff --git a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/BigQueryImplTest.java b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/BigQueryImplTest.java index ed54e6a94111..e33e08904df8 100644 --- a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/BigQueryImplTest.java +++ b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/BigQueryImplTest.java @@ -41,10 +41,12 @@ import com.google.common.collect.Lists; import com.google.gcloud.Page; import com.google.gcloud.RetryParams; +import com.google.gcloud.WriteChannel; import com.google.gcloud.bigquery.InsertAllRequest.RowToInsert; import com.google.gcloud.spi.BigQueryRpc; import com.google.gcloud.spi.BigQueryRpc.Tuple; import com.google.gcloud.spi.BigQueryRpcFactory; +import com.google.gcloud.storage.BlobInfo; import org.easymock.Capture; import org.easymock.EasyMock; @@ -107,11 +109,11 @@ public class BigQueryImplTest { private static final TableInfo OTHER_TABLE_INFO = TableInfo.of(OTHER_TABLE_ID, TABLE_SCHEMA); private static final TableInfo TABLE_INFO_WITH_PROJECT = TableInfo.of(TABLE_ID_WITH_PROJECT, TABLE_SCHEMA); - private static final LoadJobInfo LOAD_JOB = LoadJobInfo.of(TABLE_ID, "URI"); + private static final LoadJobInfo LOAD_JOB = LoadJobInfo.of(LoadConfiguration.of(TABLE_ID), "URI"); private static final LoadJobInfo LOAD_JOB_WITH_PROJECT = - LoadJobInfo.of(TABLE_ID_WITH_PROJECT, "URI"); + LoadJobInfo.of(LoadConfiguration.of(TABLE_ID_WITH_PROJECT), "URI"); private static final LoadJobInfo COMPLETE_LOAD_JOB = - LoadJobInfo.builder(TABLE_ID_WITH_PROJECT, "URI") + LoadJobInfo.builder(LoadConfiguration.of(TABLE_ID_WITH_PROJECT), "URI") .jobId(JobId.of(PROJECT, JOB)) .build(); private static final CopyJobInfo COPY_JOB = @@ -1006,6 +1008,18 @@ public void testGetQueryResultsWithOptions() { assertEquals("cursor", response.result().nextPageCursor()); } + @Test + public void testWriter() { + LoadConfiguration loadConfiguration = LoadConfiguration.of(TABLE_ID); + EasyMock.expect(bigqueryRpcMock.open(LoadConfiguration.of(TABLE_ID_WITH_PROJECT).toPb())) + .andReturn("upload-id"); + EasyMock.replay(bigqueryRpcMock); + bigquery = options.service(); + WriteChannel channel = bigquery.writer(loadConfiguration); + assertNotNull(channel); + assertTrue(channel.isOpen()); + } + @Test public void testRetryableException() { EasyMock.expect(bigqueryRpcMock.getDataset(DATASET, EMPTY_RPC_OPTIONS)) diff --git a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/ITBigQueryTest.java b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/ITBigQueryTest.java index fa527df5aa75..671a610ea14c 100644 --- a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/ITBigQueryTest.java +++ b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/ITBigQueryTest.java @@ -16,6 +16,7 @@ package com.google.gcloud.bigquery; +import static com.google.gcloud.bigquery.BigQuery.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -27,9 +28,6 @@ import com.google.common.collect.ImmutableMap; import com.google.gcloud.Page; import com.google.gcloud.bigquery.BigQuery.DatasetOption; -import com.google.gcloud.bigquery.BigQuery.JobListOption; -import com.google.gcloud.bigquery.BigQuery.JobOption; -import com.google.gcloud.bigquery.BigQuery.TableOption; import com.google.gcloud.bigquery.testing.RemoteBigQueryHelper; import com.google.gcloud.storage.BlobInfo; import com.google.gcloud.storage.BucketInfo; @@ -42,7 +40,9 @@ import org.junit.Test; import org.junit.rules.Timeout; +import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.List; @@ -147,11 +147,11 @@ public static void beforeClass() throws IOException, InterruptedException { JSON_CONTENT.getBytes(StandardCharsets.UTF_8)); DatasetInfo info = DatasetInfo.builder(DATASET).description(DESCRIPTION).build(); bigquery.create(info); - LoadJobInfo job = LoadJobInfo.builder(TABLE_ID, "gs://" + BUCKET + "/" + JSON_LOAD_FILE) + LoadConfiguration configuration = LoadConfiguration.builder(TABLE_ID, FormatOptions.json()) .createDisposition(JobInfo.CreateDisposition.CREATE_IF_NEEDED) .schema(TABLE_SCHEMA) - .formatOptions(FormatOptions.json()) .build(); + LoadJobInfo job = LoadJobInfo.of(configuration, "gs://" + BUCKET + "/" + JSON_LOAD_FILE); job = bigquery.create(job); while (job.status().state() != JobStatus.State.DONE) { Thread.sleep(1000); @@ -188,7 +188,7 @@ public void testGetDataset() { @Test public void testGetDatasetWithSelectedFields() { DatasetInfo dataset = bigquery.getDataset(DATASET, - DatasetOption.fields(BigQuery.DatasetField.CREATION_TIME)); + DatasetOption.fields(DatasetField.CREATION_TIME)); assertEquals(bigquery.options().projectId(), dataset.datasetId().project()); assertEquals(DATASET, dataset.datasetId().dataset()); assertNotNull(dataset.creationTime()); @@ -229,7 +229,7 @@ public void testUpdateDatasetWithSelectedFields() { assertEquals("Some Description", dataset.description()); DatasetInfo updatedDataset = bigquery.update(dataset.toBuilder().description("Updated Description").build(), - DatasetOption.fields(BigQuery.DatasetField.DESCRIPTION)); + DatasetOption.fields(DatasetField.DESCRIPTION)); assertEquals("Updated Description", updatedDataset.description()); assertNull(updatedDataset.creationTime()); assertNull(updatedDataset.defaultTableLifetime()); @@ -278,7 +278,7 @@ public void testCreateAndGetTableWithSelectedField() { assertEquals(DATASET, createdTableInfo.tableId().dataset()); assertEquals(tableName, createdTableInfo.tableId().table()); BaseTableInfo remoteTableInfo = bigquery.getTable(DATASET, tableName, - TableOption.fields(BigQuery.TableField.CREATION_TIME)); + TableOption.fields(TableField.CREATION_TIME)); assertNotNull(remoteTableInfo); assertTrue(remoteTableInfo instanceof TableInfo); assertEquals(createdTableInfo.tableId(), remoteTableInfo.tableId()); @@ -438,7 +438,7 @@ public void testUpdateTableWithSelectedFields() { BaseTableInfo createdTableInfo = bigquery.create(tableInfo); assertNotNull(createdTableInfo); BaseTableInfo updatedTableInfo = bigquery.update(tableInfo.toBuilder().description("newDescr") - .build(), TableOption.fields(BigQuery.TableField.DESCRIPTION)); + .build(), TableOption.fields(TableField.DESCRIPTION)); assertTrue(updatedTableInfo instanceof TableInfo); assertEquals(DATASET, updatedTableInfo.tableId().dataset()); assertEquals(tableName, updatedTableInfo.tableId().table()); @@ -659,7 +659,7 @@ public void testListJobs() { @Test public void testListJobsWithSelectedFields() { - Page jobs = bigquery.listJobs(JobListOption.fields(BigQuery.JobField.USER_EMAIL)); + Page jobs = bigquery.listJobs(JobListOption.fields(JobField.USER_EMAIL)); for (JobInfo job : jobs.values()) { assertNotNull(job.jobId()); assertNotNull(job.status()); @@ -709,7 +709,7 @@ public void testCreateAndGetJobWithSelectedFields() throws InterruptedException assertEquals(sourceTableName, createdTableInfo.tableId().table()); TableId destinationTable = TableId.of(DATASET, destinationTableName); CopyJobInfo job = CopyJobInfo.of(destinationTable, sourceTable); - CopyJobInfo createdJob = bigquery.create(job, JobOption.fields(BigQuery.JobField.ETAG)); + CopyJobInfo createdJob = bigquery.create(job, JobOption.fields(JobField.ETAG)); assertNotNull(createdJob.jobId()); assertNotNull(createdJob.sourceTables()); assertNotNull(createdJob.destinationTable()); @@ -719,7 +719,7 @@ public void testCreateAndGetJobWithSelectedFields() throws InterruptedException assertNull(createdJob.selfLink()); assertNull(createdJob.userEmail()); CopyJobInfo remoteJob = bigquery.getJob(createdJob.jobId(), - JobOption.fields(BigQuery.JobField.ETAG)); + JobOption.fields(JobField.ETAG)); assertEquals(createdJob.jobId(), remoteJob.jobId()); assertEquals(createdJob.sourceTables(), remoteJob.sourceTables()); assertEquals(createdJob.destinationTable(), remoteJob.destinationTable()); @@ -810,10 +810,11 @@ public void testQueryJob() throws InterruptedException { public void testExtractJob() throws InterruptedException { String tableName = "test_export_job_table"; TableId destinationTable = TableId.of(DATASET, tableName); - LoadJobInfo remoteLoadJob = bigquery.create( - LoadJobInfo.builder(destinationTable, "gs://" + BUCKET + "/" + LOAD_FILE) - .schema(SIMPLE_SCHEMA) - .build()); + LoadConfiguration configuration = LoadConfiguration.builder(destinationTable) + .schema(SIMPLE_SCHEMA) + .build(); + LoadJobInfo remoteLoadJob = + bigquery.create(LoadJobInfo.of(configuration, "gs://" + BUCKET + "/" + LOAD_FILE)); while (remoteLoadJob.status().state() != JobStatus.State.DONE) { Thread.sleep(1000); remoteLoadJob = bigquery.getJob(remoteLoadJob.jobId()); @@ -857,4 +858,51 @@ public void testCancelJob() throws InterruptedException { public void testCancelNonExistingJob() throws InterruptedException { assertFalse(bigquery.cancel("test_cancel_non_existing_job")); } + + @Test + public void testInsertFromFile() throws InterruptedException, FileNotFoundException { + String destinationTableName = "test_insert_from_file_table"; + TableId tableId = TableId.of(DATASET, destinationTableName); + LoadConfiguration configuration = LoadConfiguration.builder(tableId) + .formatOptions(FormatOptions.json()) + .createDisposition(JobInfo.CreateDisposition.CREATE_IF_NEEDED) + .schema(TABLE_SCHEMA) + .build(); + try (TableDataWriteChannel channel = bigquery.writer(configuration)) { + channel.write(ByteBuffer.wrap(JSON_CONTENT.getBytes(StandardCharsets.UTF_8))); + } catch (IOException e) { + fail("IOException was not expected"); + } + // wait until the new table is created. If the table is never created the test will time-out + while (bigquery.getTable(tableId) == null) { + Thread.sleep(1000L); + } + Page> rows = bigquery.listTableData(tableId); + int rowCount = 0; + for (List row : rows.values()) { + FieldValue timestampCell = row.get(0); + FieldValue stringCell = row.get(1); + FieldValue integerCell = row.get(2); + FieldValue booleanCell = row.get(3); + FieldValue recordCell = row.get(4); + assertEquals(FieldValue.Attribute.PRIMITIVE, timestampCell.attribute()); + assertEquals(FieldValue.Attribute.PRIMITIVE, stringCell.attribute()); + assertEquals(FieldValue.Attribute.REPEATED, integerCell.attribute()); + assertEquals(FieldValue.Attribute.PRIMITIVE, booleanCell.attribute()); + assertEquals(FieldValue.Attribute.RECORD, recordCell.attribute()); + assertEquals(1408452095220000L, timestampCell.timestampValue()); + assertEquals("stringValue", stringCell.stringValue()); + assertEquals(0, integerCell.repeatedValue().get(0).longValue()); + assertEquals(1, integerCell.repeatedValue().get(1).longValue()); + assertEquals(false, booleanCell.booleanValue()); + assertEquals(-14182916000000L, recordCell.recordValue().get(0).timestampValue()); + assertTrue(recordCell.recordValue().get(1).isNull()); + assertEquals(1, recordCell.recordValue().get(2).repeatedValue().get(0).longValue()); + assertEquals(0, recordCell.recordValue().get(2).repeatedValue().get(1).longValue()); + assertEquals(true, recordCell.recordValue().get(3).booleanValue()); + rowCount++; + } + assertEquals(2, rowCount); + assertTrue(bigquery.delete(DATASET, destinationTableName)); + } } diff --git a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/LoadConfigurationTest.java b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/LoadConfigurationTest.java new file mode 100644 index 000000000000..57616210501f --- /dev/null +++ b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/LoadConfigurationTest.java @@ -0,0 +1,123 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.bigquery; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.common.collect.ImmutableList; +import com.google.gcloud.bigquery.JobInfo.CreateDisposition; +import com.google.gcloud.bigquery.JobInfo.WriteDisposition; + +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.util.List; + +public class LoadConfigurationTest { + + private static final CsvOptions CSV_OPTIONS = CsvOptions.builder() + .allowJaggedRows(true) + .allowQuotedNewLines(false) + .encoding(StandardCharsets.UTF_8) + .build(); + private static final TableId TABLE_ID = TableId.of("dataset", "table"); + private static final CreateDisposition CREATE_DISPOSITION = CreateDisposition.CREATE_IF_NEEDED; + private static final WriteDisposition WRITE_DISPOSITION = WriteDisposition.WRITE_APPEND; + private static final Integer MAX_BAD_RECORDS = 42; + private static final String FORMAT = "CSV"; + private static final Boolean IGNORE_UNKNOWN_VALUES = true; + private static final List PROJECTION_FIELDS = ImmutableList.of("field1", "field2"); + private static final Field FIELD_SCHEMA = Field.builder("IntegerField", Field.Type.integer()) + .mode(Field.Mode.REQUIRED) + .description("FieldDescription") + .build(); + private static final Schema TABLE_SCHEMA = Schema.of(FIELD_SCHEMA); + private static final LoadConfiguration LOAD_CONFIGURATION = LoadConfiguration.builder(TABLE_ID) + .createDisposition(CREATE_DISPOSITION) + .writeDisposition(WRITE_DISPOSITION) + .formatOptions(CSV_OPTIONS) + .ignoreUnknownValues(IGNORE_UNKNOWN_VALUES) + .maxBadRecords(MAX_BAD_RECORDS) + .projectionFields(PROJECTION_FIELDS) + .schema(TABLE_SCHEMA) + .build(); + + @Test + public void testToBuilder() { + compareLoadConfiguration(LOAD_CONFIGURATION, LOAD_CONFIGURATION.toBuilder().build()); + LoadConfiguration configuration = LOAD_CONFIGURATION.toBuilder() + .destinationTable(TableId.of("dataset", "newTable")) + .build(); + assertEquals("newTable", configuration.destinationTable().table()); + configuration = configuration.toBuilder().destinationTable(TABLE_ID).build(); + compareLoadConfiguration(LOAD_CONFIGURATION, configuration); + } + + @Test + public void testOf() { + LoadConfiguration configuration = LoadConfiguration.of(TABLE_ID); + assertEquals(TABLE_ID, configuration.destinationTable()); + configuration = LoadConfiguration.of(TABLE_ID, CSV_OPTIONS); + assertEquals(TABLE_ID, configuration.destinationTable()); + assertEquals(FORMAT, configuration.format()); + assertEquals(CSV_OPTIONS, configuration.csvOptions()); + } + + @Test + public void testToBuilderIncomplete() { + LoadConfiguration configuration = LoadConfiguration.of(TABLE_ID); + compareLoadConfiguration( configuration, configuration.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(TABLE_ID, LOAD_CONFIGURATION.destinationTable()); + assertEquals(CREATE_DISPOSITION, LOAD_CONFIGURATION.createDisposition()); + assertEquals(WRITE_DISPOSITION, LOAD_CONFIGURATION.writeDisposition()); + assertEquals(CSV_OPTIONS, LOAD_CONFIGURATION.csvOptions()); + assertEquals(FORMAT, LOAD_CONFIGURATION.format()); + assertEquals(IGNORE_UNKNOWN_VALUES, LOAD_CONFIGURATION.ignoreUnknownValues()); + assertEquals(MAX_BAD_RECORDS, LOAD_CONFIGURATION.maxBadRecords()); + assertEquals(PROJECTION_FIELDS, LOAD_CONFIGURATION.projectionFields()); + assertEquals(TABLE_SCHEMA, LOAD_CONFIGURATION.schema()); + } + + @Test + public void testToPbAndFromPb() { + assertNull(LOAD_CONFIGURATION.toPb().getSourceUris()); + compareLoadConfiguration(LOAD_CONFIGURATION, + LoadConfiguration.fromPb(LOAD_CONFIGURATION.toPb())); + LoadConfiguration configuration = LoadConfiguration.of(TABLE_ID); + compareLoadConfiguration(configuration, LoadConfiguration.fromPb(configuration.toPb())); + } + + private void compareLoadConfiguration(LoadConfiguration expected, LoadConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.hashCode(), value.hashCode()); + assertEquals(expected.toString(), value.toString()); + assertEquals(expected.destinationTable(), value.destinationTable()); + assertEquals(expected.createDisposition(), value.createDisposition()); + assertEquals(expected.writeDisposition(), value.writeDisposition()); + assertEquals(expected.csvOptions(), value.csvOptions()); + assertEquals(expected.format(), value.format()); + assertEquals(expected.ignoreUnknownValues(), value.ignoreUnknownValues()); + assertEquals(expected.maxBadRecords(), value.maxBadRecords()); + assertEquals(expected.projectionFields(), value.projectionFields()); + assertEquals(expected.schema(), value.schema()); + } +} diff --git a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/LoadJobInfoTest.java b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/LoadJobInfoTest.java index 06ce0b42ad4b..499d0d939698 100644 --- a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/LoadJobInfoTest.java +++ b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/LoadJobInfoTest.java @@ -47,7 +47,6 @@ public class LoadJobInfoTest { private static final CreateDisposition CREATE_DISPOSITION = CreateDisposition.CREATE_IF_NEEDED; private static final WriteDisposition WRITE_DISPOSITION = WriteDisposition.WRITE_APPEND; private static final Integer MAX_BAD_RECORDS = 42; - private static final String FORMAT = "CSV"; private static final Boolean IGNORE_UNKNOWN_VALUES = true; private static final List PROJECTION_FIELDS = ImmutableList.of("field1", "field2"); private static final JobId JOB_ID = JobId.of("job"); @@ -66,13 +65,7 @@ public class LoadJobInfoTest { .inputBytes(2048L) .outputRows(24L) .build(); - private static final LoadJobInfo LOAD_JOB = LoadJobInfo.builder(TABLE_ID, SOURCE_URIS) - .etag(ETAG) - .id(ID) - .selfLink(SELF_LINK) - .userEmail(EMAIL) - .jobId(JOB_ID) - .status(JOB_STATUS) + private static final LoadConfiguration LOAD_CONFIGURATION = LoadConfiguration.builder(TABLE_ID) .createDisposition(CREATE_DISPOSITION) .writeDisposition(WRITE_DISPOSITION) .formatOptions(CSV_OPTIONS) @@ -80,63 +73,47 @@ public class LoadJobInfoTest { .maxBadRecords(MAX_BAD_RECORDS) .projectionFields(PROJECTION_FIELDS) .schema(TABLE_SCHEMA) + .build(); + private static final LoadJobInfo LOAD_JOB = LoadJobInfo.builder(LOAD_CONFIGURATION, SOURCE_URIS) + .etag(ETAG) + .id(ID) + .selfLink(SELF_LINK) + .userEmail(EMAIL) + .jobId(JOB_ID) + .status(JOB_STATUS) .statistics(JOB_STATISTICS) .build(); @Test public void testToBuilder() { compareLoadJobInfo(LOAD_JOB, LOAD_JOB.toBuilder().build()); - LoadJobInfo job = LOAD_JOB.toBuilder() - .destinationTable(TableId.of("dataset", "newTable")) - .build(); - assertEquals("newTable", job.destinationTable().table()); - job = job.toBuilder().destinationTable(TABLE_ID).build(); + LoadJobInfo job = LOAD_JOB.toBuilder().etag("newEtag").build(); + assertEquals("newEtag", job.etag()); + job = job.toBuilder().etag(ETAG).build(); compareLoadJobInfo(LOAD_JOB, job); } @Test public void testOf() { - LoadJobInfo job = LoadJobInfo.of(TABLE_ID, SOURCE_URIS); - assertEquals(TABLE_ID, job.destinationTable()); - assertEquals(SOURCE_URIS, job.sourceUris()); - job = LoadJobInfo.of(TABLE_ID, SOURCE_URI); - assertEquals(TABLE_ID, job.destinationTable()); - assertEquals(ImmutableList.of(SOURCE_URI), job.sourceUris()); - job = LoadJobInfo.of(TABLE_ID, CSV_OPTIONS, SOURCE_URIS); - assertEquals(TABLE_ID, job.destinationTable()); + LoadJobInfo job = LoadJobInfo.of(LOAD_CONFIGURATION, SOURCE_URIS); + assertEquals(LOAD_CONFIGURATION, job.configuration()); assertEquals(SOURCE_URIS, job.sourceUris()); - assertEquals(FORMAT, job.format()); - assertEquals(CSV_OPTIONS, job.csvOptions()); - job = LoadJobInfo.of(TABLE_ID, CSV_OPTIONS, SOURCE_URI); - assertEquals(TABLE_ID, job.destinationTable()); - assertEquals(ImmutableList.of(SOURCE_URI), job.sourceUris()); - assertEquals(FORMAT, job.format()); - assertEquals(CSV_OPTIONS, job.csvOptions()); - job = LoadJobInfo.of(JOB_ID, TABLE_ID, SOURCE_URIS); - assertEquals(JOB_ID, job.jobId()); - assertEquals(TABLE_ID, job.destinationTable()); - assertEquals(SOURCE_URIS, job.sourceUris()); - job = LoadJobInfo.of(JOB_ID, TABLE_ID, SOURCE_URI); - assertEquals(JOB_ID, job.jobId()); - assertEquals(TABLE_ID, job.destinationTable()); + job = LoadJobInfo.of(LOAD_CONFIGURATION, SOURCE_URI); + assertEquals(LOAD_CONFIGURATION, job.configuration()); assertEquals(ImmutableList.of(SOURCE_URI), job.sourceUris()); - job = LoadJobInfo.of(JOB_ID, TABLE_ID, CSV_OPTIONS, SOURCE_URIS); + job = LoadJobInfo.of(JOB_ID, LOAD_CONFIGURATION, SOURCE_URIS); assertEquals(JOB_ID, job.jobId()); - assertEquals(TABLE_ID, job.destinationTable()); + assertEquals(LOAD_CONFIGURATION, job.configuration()); assertEquals(SOURCE_URIS, job.sourceUris()); - assertEquals(FORMAT, job.format()); - assertEquals(CSV_OPTIONS, job.csvOptions()); - job = LoadJobInfo.of(JOB_ID, TABLE_ID, CSV_OPTIONS, SOURCE_URI); + job = LoadJobInfo.of(JOB_ID, LOAD_CONFIGURATION, SOURCE_URI); assertEquals(JOB_ID, job.jobId()); - assertEquals(TABLE_ID, job.destinationTable()); + assertEquals(LOAD_CONFIGURATION, job.configuration()); assertEquals(ImmutableList.of(SOURCE_URI), job.sourceUris()); - assertEquals(FORMAT, job.format()); - assertEquals(CSV_OPTIONS, job.csvOptions()); } @Test public void testToBuilderIncomplete() { - LoadJobInfo job = LoadJobInfo.of(TABLE_ID, SOURCE_URIS); + LoadJobInfo job = LoadJobInfo.of(LOAD_CONFIGURATION, SOURCE_URIS); compareLoadJobInfo(job, job.toBuilder().build()); } @@ -148,16 +125,8 @@ public void testBuilder() { assertEquals(EMAIL, LOAD_JOB.userEmail()); assertEquals(JOB_ID, LOAD_JOB.jobId()); assertEquals(JOB_STATUS, LOAD_JOB.status()); - assertEquals(TABLE_ID, LOAD_JOB.destinationTable()); + assertEquals(LOAD_CONFIGURATION, LOAD_JOB.configuration()); assertEquals(SOURCE_URIS, LOAD_JOB.sourceUris()); - assertEquals(CREATE_DISPOSITION, LOAD_JOB.createDisposition()); - assertEquals(WRITE_DISPOSITION, LOAD_JOB.writeDisposition()); - assertEquals(CSV_OPTIONS, LOAD_JOB.csvOptions()); - assertEquals(FORMAT, LOAD_JOB.format()); - assertEquals(IGNORE_UNKNOWN_VALUES, LOAD_JOB.ignoreUnknownValues()); - assertEquals(MAX_BAD_RECORDS, LOAD_JOB.maxBadRecords()); - assertEquals(PROJECTION_FIELDS, LOAD_JOB.projectionFields()); - assertEquals(TABLE_SCHEMA, LOAD_JOB.schema()); assertEquals(JOB_STATISTICS, LOAD_JOB.statistics()); } @@ -170,7 +139,7 @@ public void testToPbAndFromPb() { assertEquals(JOB_STATISTICS, JobStatistics.fromPb(LOAD_JOB.toPb().getStatistics())); compareLoadJobInfo(LOAD_JOB, LoadJobInfo.fromPb(LOAD_JOB.toPb())); compareLoadJobInfo(LOAD_JOB, (LoadJobInfo) JobInfo.fromPb(LOAD_JOB.toPb())); - LoadJobInfo job = LoadJobInfo.of(TABLE_ID, SOURCE_URIS); + LoadJobInfo job = LoadJobInfo.of(LOAD_CONFIGURATION, SOURCE_URIS); compareLoadJobInfo(job, LoadJobInfo.fromPb(job.toPb())); compareLoadJobInfo(job, (LoadJobInfo) JobInfo.fromPb(job.toPb())); } @@ -186,15 +155,7 @@ private void compareLoadJobInfo(LoadJobInfo expected, LoadJobInfo value) { assertEquals(expected.status(), value.status()); assertEquals(expected.statistics(), value.statistics()); assertEquals(expected.userEmail(), value.userEmail()); - assertEquals(expected.destinationTable(), value.destinationTable()); + assertEquals(expected.configuration(), value.configuration()); assertEquals(expected.sourceUris(), value.sourceUris()); - assertEquals(expected.createDisposition(), value.createDisposition()); - assertEquals(expected.writeDisposition(), value.writeDisposition()); - assertEquals(expected.csvOptions(), value.csvOptions()); - assertEquals(expected.format(), value.format()); - assertEquals(expected.ignoreUnknownValues(), value.ignoreUnknownValues()); - assertEquals(expected.maxBadRecords(), value.maxBadRecords()); - assertEquals(expected.projectionFields(), value.projectionFields()); - assertEquals(expected.schema(), value.schema()); } } diff --git a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/SerializationTest.java b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/SerializationTest.java index 8c80bddbfefb..d407ac1630e3 100644 --- a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/SerializationTest.java +++ b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/SerializationTest.java @@ -22,7 +22,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.gcloud.AuthCredentials; +import com.google.gcloud.RestorableState; import com.google.gcloud.RetryParams; +import com.google.gcloud.WriteChannel; import com.google.gcloud.bigquery.TableInfo.StreamingBuffer; import org.junit.Test; @@ -99,9 +101,9 @@ public class SerializationTest { private static final List SOURCE_URIS = ImmutableList.of("uri1", "uri2"); private static final ExternalDataConfiguration EXTERNAL_DATA_CONFIGURATION = ExternalDataConfiguration.builder(SOURCE_URIS, TABLE_SCHEMA, CSV_OPTIONS) - .ignoreUnknownValues(true) - .maxBadRecords(42) - .build(); + .ignoreUnknownValues(true) + .maxBadRecords(42) + .build(); private static final UserDefinedFunction INLINE_FUNCTION = new UserDefinedFunction.InlineFunction("inline"); private static final UserDefinedFunction URI_FUNCTION = @@ -130,10 +132,10 @@ public class SerializationTest { .id(ID) .build(); private static final JobStatistics JOB_STATISTICS = JobStatistics.builder() - .creationTime(1L) - .endTime(3L) - .startTime(2L) - .build(); + .creationTime(1L) + .endTime(3L) + .startTime(2L) + .build(); private static final JobStatistics.ExtractStatistics EXTRACT_STATISTICS = JobStatistics.ExtractStatistics.builder() .creationTime(1L) @@ -168,7 +170,15 @@ public class SerializationTest { private static final JobId JOB_ID = JobId.of("project", "job"); private static final CopyJobInfo COPY_JOB = CopyJobInfo.of(TABLE_ID, TABLE_ID); private static final ExtractJobInfo EXTRACT_JOB = ExtractJobInfo.of(TABLE_ID, SOURCE_URIS); - private static final LoadJobInfo LOAD_JOB = LoadJobInfo.of(TABLE_ID, SOURCE_URIS); + private static final LoadConfiguration LOAD_CONFIGURATION = LoadConfiguration.builder(TABLE_ID) + .createDisposition(JobInfo.CreateDisposition.CREATE_IF_NEEDED) + .writeDisposition(JobInfo.WriteDisposition.WRITE_APPEND) + .formatOptions(CSV_OPTIONS) + .ignoreUnknownValues(true) + .maxBadRecords(10) + .schema(TABLE_SCHEMA) + .build(); + private static final LoadJobInfo LOAD_JOB = LoadJobInfo.of(LOAD_CONFIGURATION, SOURCE_URIS); private static final QueryJobInfo QUERY_JOB = QueryJobInfo.of("query"); private static final Map CONTENT1 = ImmutableMap.of("key", "val1"); @@ -231,8 +241,8 @@ public void testModelAndRequests() throws Exception { DATASET_INFO, TABLE_ID, CSV_OPTIONS, STREAMING_BUFFER, EXTERNAL_DATA_CONFIGURATION, TABLE_SCHEMA, TABLE_INFO, VIEW_INFO, EXTERNAL_TABLE_INFO, INLINE_FUNCTION, URI_FUNCTION, JOB_STATISTICS, EXTRACT_STATISTICS, LOAD_STATISTICS, QUERY_STATISTICS, BIGQUERY_ERROR, - JOB_STATUS, JOB_ID, COPY_JOB, EXTRACT_JOB, LOAD_JOB, QUERY_JOB, INSERT_ALL_REQUEST, - INSERT_ALL_RESPONSE, FIELD_VALUE, QUERY_REQUEST, QUERY_RESPONSE, + JOB_STATUS, JOB_ID, COPY_JOB, EXTRACT_JOB, LOAD_CONFIGURATION, LOAD_JOB, QUERY_JOB, + INSERT_ALL_REQUEST, INSERT_ALL_RESPONSE, FIELD_VALUE, QUERY_REQUEST, QUERY_RESPONSE, BigQuery.DatasetOption.fields(), BigQuery.DatasetDeleteOption.deleteContents(), BigQuery.DatasetListOption.all(), BigQuery.TableOption.fields(), BigQuery.TableListOption.maxResults(42L), BigQuery.JobOption.fields(), @@ -246,8 +256,25 @@ public void testModelAndRequests() throws Exception { } } + @Test + public void testWriteChannelState() throws IOException, ClassNotFoundException { + BigQueryOptions options = BigQueryOptions.builder() + .projectId("p2") + .retryParams(RetryParams.defaultInstance()) + .build(); + // avoid closing when you don't want partial writes upon failure + @SuppressWarnings("resource") + TableDataWriteChannel writer = + new TableDataWriteChannel(options, LOAD_CONFIGURATION, "upload-id"); + RestorableState state = writer.capture(); + RestorableState deserializedState = serializeAndDeserialize(state); + assertEquals(state, deserializedState); + assertEquals(state.hashCode(), deserializedState.hashCode()); + assertEquals(state.toString(), deserializedState.toString()); + } + @SuppressWarnings("unchecked") - private T serializeAndDeserialize(T obj) + private T serializeAndDeserialize(T obj) throws IOException, ClassNotFoundException { ByteArrayOutputStream bytes = new ByteArrayOutputStream(); try (ObjectOutputStream output = new ObjectOutputStream(bytes)) { diff --git a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/TableDataWriteChannelTest.java b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/TableDataWriteChannelTest.java new file mode 100644 index 000000000000..67933407e377 --- /dev/null +++ b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/TableDataWriteChannelTest.java @@ -0,0 +1,248 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud.bigquery; + +import static org.easymock.EasyMock.anyObject; +import static org.easymock.EasyMock.capture; +import static org.easymock.EasyMock.captureLong; +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.eq; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.expectLastCall; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.gcloud.RestorableState; +import com.google.gcloud.WriteChannel; +import com.google.gcloud.spi.BigQueryRpc; +import com.google.gcloud.spi.BigQueryRpcFactory; + +import org.easymock.Capture; +import org.easymock.CaptureType; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Random; + +public class TableDataWriteChannelTest { + + private static final String UPLOAD_ID = "uploadid"; + private static final TableId TABLE_ID = TableId.of("dataset", "table"); + private static final LoadConfiguration LOAD_CONFIGURATION = LoadConfiguration.builder(TABLE_ID) + .createDisposition(JobInfo.CreateDisposition.CREATE_IF_NEEDED) + .writeDisposition(JobInfo.WriteDisposition.WRITE_APPEND) + .formatOptions(FormatOptions.json()) + .ignoreUnknownValues(true) + .maxBadRecords(10) + .build(); + private static final int MIN_CHUNK_SIZE = 256 * 1024; + private static final int DEFAULT_CHUNK_SIZE = 8 * MIN_CHUNK_SIZE; + private static final int CUSTOM_CHUNK_SIZE = 4 * MIN_CHUNK_SIZE; + private static final Random RANDOM = new Random(); + + private BigQueryOptions options; + private BigQueryRpcFactory rpcFactoryMock; + private BigQueryRpc bigqueryRpcMock; + private TableDataWriteChannel writer; + + @Before + public void setUp() { + rpcFactoryMock = createMock(BigQueryRpcFactory.class); + bigqueryRpcMock = createMock(BigQueryRpc.class); + expect(rpcFactoryMock.create(anyObject(BigQueryOptions.class))) + .andReturn(bigqueryRpcMock); + replay(rpcFactoryMock); + options = BigQueryOptions.builder() + .projectId("projectid") + .serviceRpcFactory(rpcFactoryMock) + .build(); + } + + @After + public void tearDown() throws Exception { + verify(rpcFactoryMock, bigqueryRpcMock); + } + + @Test + public void testCreate() { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + assertTrue(writer.isOpen()); + } + + @Test + public void testWriteWithoutFlush() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + assertEquals(MIN_CHUNK_SIZE, writer.write(ByteBuffer.allocate(MIN_CHUNK_SIZE))); + } + + @Test + public void testWriteWithFlush() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + Capture capturedBuffer = Capture.newInstance(); + bigqueryRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), + eq(CUSTOM_CHUNK_SIZE), eq(false)); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + writer.chunkSize(CUSTOM_CHUNK_SIZE); + ByteBuffer buffer = randomBuffer(CUSTOM_CHUNK_SIZE); + assertEquals(CUSTOM_CHUNK_SIZE, writer.write(buffer)); + assertArrayEquals(buffer.array(), capturedBuffer.getValue()); + } + + @Test + public void testWritesAndFlush() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + Capture capturedBuffer = Capture.newInstance(); + bigqueryRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), + eq(DEFAULT_CHUNK_SIZE), eq(false)); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + ByteBuffer[] buffers = new ByteBuffer[DEFAULT_CHUNK_SIZE / MIN_CHUNK_SIZE]; + for (int i = 0; i < buffers.length; i++) { + buffers[i] = randomBuffer(MIN_CHUNK_SIZE); + assertEquals(MIN_CHUNK_SIZE, writer.write(buffers[i])); + } + for (int i = 0; i < buffers.length; i++) { + assertArrayEquals( + buffers[i].array(), + Arrays.copyOfRange( + capturedBuffer.getValue(), MIN_CHUNK_SIZE * i, MIN_CHUNK_SIZE * (i + 1))); + } + } + + @Test + public void testCloseWithoutFlush() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + Capture capturedBuffer = Capture.newInstance(); + bigqueryRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(0), eq(true)); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + assertTrue(writer.isOpen()); + writer.close(); + assertArrayEquals(new byte[0], capturedBuffer.getValue()); + assertTrue(!writer.isOpen()); + } + + @Test + public void testCloseWithFlush() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + Capture capturedBuffer = Capture.newInstance(); + ByteBuffer buffer = randomBuffer(MIN_CHUNK_SIZE); + bigqueryRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(MIN_CHUNK_SIZE), + eq(true)); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + assertTrue(writer.isOpen()); + writer.write(buffer); + writer.close(); + assertEquals(DEFAULT_CHUNK_SIZE, capturedBuffer.getValue().length); + assertArrayEquals(buffer.array(), Arrays.copyOf(capturedBuffer.getValue(), MIN_CHUNK_SIZE)); + assertTrue(!writer.isOpen()); + } + + @Test + public void testWriteClosed() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + Capture capturedBuffer = Capture.newInstance(); + bigqueryRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(0), eq(true)); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + writer.close(); + try { + writer.write(ByteBuffer.allocate(MIN_CHUNK_SIZE)); + fail("Expected TableDataWriteChannel write to throw IOException"); + } catch (IOException ex) { + // expected + } + } + + @Test + public void testSaveAndRestore() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + Capture capturedBuffer = Capture.newInstance(CaptureType.ALL); + Capture capturedPosition = Capture.newInstance(CaptureType.ALL); + bigqueryRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), + captureLong(capturedPosition), eq(DEFAULT_CHUNK_SIZE), eq(false)); + expectLastCall().times(2); + replay(bigqueryRpcMock); + ByteBuffer buffer1 = randomBuffer(DEFAULT_CHUNK_SIZE); + ByteBuffer buffer2 = randomBuffer(DEFAULT_CHUNK_SIZE); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + assertEquals(DEFAULT_CHUNK_SIZE, writer.write(buffer1)); + assertArrayEquals(buffer1.array(), capturedBuffer.getValues().get(0)); + assertEquals(new Long(0L), capturedPosition.getValues().get(0)); + RestorableState writerState = writer.capture(); + WriteChannel restoredWriter = writerState.restore(); + assertEquals(DEFAULT_CHUNK_SIZE, restoredWriter.write(buffer2)); + assertArrayEquals(buffer2.array(), capturedBuffer.getValues().get(1)); + assertEquals(new Long(DEFAULT_CHUNK_SIZE), capturedPosition.getValues().get(1)); + } + + @Test + public void testSaveAndRestoreClosed() throws IOException { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID); + Capture capturedBuffer = Capture.newInstance(); + bigqueryRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(0), eq(true)); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + writer.close(); + RestorableState writerState = writer.capture(); + RestorableState expectedWriterState = + TableDataWriteChannel.StateImpl.builder(options, LOAD_CONFIGURATION, UPLOAD_ID) + .buffer(null) + .chunkSize(DEFAULT_CHUNK_SIZE) + .isOpen(false) + .position(0) + .build(); + WriteChannel restoredWriter = writerState.restore(); + assertArrayEquals(new byte[0], capturedBuffer.getValue()); + assertEquals(expectedWriterState, restoredWriter.capture()); + } + + @Test + public void testStateEquals() { + expect(bigqueryRpcMock.open(LOAD_CONFIGURATION.toPb())).andReturn(UPLOAD_ID).times(2); + replay(bigqueryRpcMock); + writer = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + // avoid closing when you don't want partial writes upon failure + @SuppressWarnings("resource") + WriteChannel writer2 = new TableDataWriteChannel(options, LOAD_CONFIGURATION); + RestorableState state = writer.capture(); + RestorableState state2 = writer2.capture(); + assertEquals(state, state2); + assertEquals(state.hashCode(), state2.hashCode()); + assertEquals(state.toString(), state2.toString()); + } + + private static ByteBuffer randomBuffer(int size) { + byte[] byteArray = new byte[size]; + RANDOM.nextBytes(byteArray); + return ByteBuffer.wrap(byteArray); + } +} diff --git a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/TableTest.java b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/TableTest.java index dfcf17c90ab3..c931d768def1 100644 --- a/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/TableTest.java +++ b/gcloud-java-bigquery/src/test/java/com/google/gcloud/bigquery/TableTest.java @@ -48,9 +48,9 @@ public class TableTest { private static final TableId TABLE_ID1 = TableId.of("dataset", "table1"); private static final TableId TABLE_ID2 = TableId.of("dataset", "table2"); private static final JobInfo COPY_JOB_INFO = CopyJobInfo.of(TABLE_ID2, TABLE_ID1); - private static final JobInfo LOAD_JOB_INFO = - LoadJobInfo.builder(TABLE_ID1, ImmutableList.of("URI")) - .formatOptions(FormatOptions.json()) + private static final JobInfo LOAD_JOB_INFO = LoadJobInfo.builder( + LoadConfiguration.builder(TABLE_ID1).formatOptions(FormatOptions.json()).build(), + ImmutableList.of("URI")) .build(); private static final JobInfo EXTRACT_JOB_INFO = ExtractJobInfo.builder(TABLE_ID1, ImmutableList.of("URI")) diff --git a/gcloud-java-core/src/main/java/com/google/gcloud/BaseWriteChannel.java b/gcloud-java-core/src/main/java/com/google/gcloud/BaseWriteChannel.java new file mode 100644 index 000000000000..0a2e4126d180 --- /dev/null +++ b/gcloud-java-core/src/main/java/com/google/gcloud/BaseWriteChannel.java @@ -0,0 +1,218 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud; + +import com.google.common.base.MoreObjects; + +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Objects; + +/** + * Base implementation for a {@link WriteChannel}. + * + * @param the service options used by the channel to issue RPC requests + * @param the entity this channel writes data to. Possibly with additional configuration + */ +public abstract class BaseWriteChannel< + ServiceOptionsT extends ServiceOptions, + EntityT extends Serializable> implements WriteChannel { + + private static final int MIN_CHUNK_SIZE = 256 * 1024; + private static final int DEFAULT_CHUNK_SIZE = 8 * MIN_CHUNK_SIZE; + + protected final ServiceOptionsT options; + protected final EntityT entity; + protected final String uploadId; + protected int position; + protected byte[] buffer = new byte[0]; + protected int limit; + protected boolean isOpen = true; + protected int chunkSize = DEFAULT_CHUNK_SIZE; + + protected abstract void write(int length, boolean last); + + protected BaseWriteChannel(ServiceOptionsT options, EntityT entity, String uploadId) { + this.options = options; + this.entity = entity; + this.uploadId = uploadId; + } + + protected void flush() { + if (limit >= chunkSize) { + final int length = limit - limit % MIN_CHUNK_SIZE; + write(length, false); + position += length; + limit -= length; + byte[] temp = new byte[chunkSize]; + System.arraycopy(buffer, length, temp, 0, limit); + buffer = temp; + } + } + + protected void validateOpen() throws IOException { + if (!isOpen) { + throw new IOException("stream is closed"); + } + } + + @Override + public int write(ByteBuffer byteBuffer) throws IOException { + validateOpen(); + int toWrite = byteBuffer.remaining(); + int spaceInBuffer = buffer.length - limit; + if (spaceInBuffer >= toWrite) { + byteBuffer.get(buffer, limit, toWrite); + } else { + buffer = Arrays.copyOf(buffer, Math.max(chunkSize, buffer.length + toWrite - spaceInBuffer)); + byteBuffer.get(buffer, limit, toWrite); + } + limit += toWrite; + flush(); + return toWrite; + } + + @Override + public boolean isOpen() { + return isOpen; + } + + @Override + public void close() throws IOException { + if (isOpen) { + write(limit, true); + position += buffer.length; + isOpen = false; + buffer = null; + } + } + + @Override + public void chunkSize(int chunkSize) { + chunkSize = (chunkSize / MIN_CHUNK_SIZE) * MIN_CHUNK_SIZE; + this.chunkSize = Math.max(MIN_CHUNK_SIZE, chunkSize); + } + + protected abstract static class BaseState< + ServiceOptionsT extends ServiceOptions, EntityT extends Serializable> + implements RestorableState, Serializable { + + private static final long serialVersionUID = 8541062465055125619L; + + protected final ServiceOptionsT serviceOptions; + protected final EntityT entity; + protected final String uploadId; + protected final int position; + protected final byte[] buffer; + protected final boolean isOpen; + protected final int chunkSize; + + protected BaseState(Builder builder) { + this.serviceOptions = builder.serviceOptions; + this.entity = builder.entity; + this.uploadId = builder.uploadId; + this.position = builder.position; + this.buffer = builder.buffer; + this.isOpen = builder.isOpen; + this.chunkSize = builder.chunkSize; + } + + /** + * Base builder for a write channel's state. Users are not supposed to access this class + * directly. + * + * @param the service options used by the channel to issue RPC requests + * @param the entity this channel writes data to. Possibly with additional + * configuration + */ + public abstract static class Builder< + ServiceOptionsT extends ServiceOptions, + EntityT extends Serializable> { + private final ServiceOptionsT serviceOptions; + private final EntityT entity; + private final String uploadId; + private int position; + private byte[] buffer; + private boolean isOpen; + private int chunkSize; + + protected Builder(ServiceOptionsT options, EntityT entity, String uploadId) { + this.serviceOptions = options; + this.entity = entity; + this.uploadId = uploadId; + } + + public Builder position(int position) { + this.position = position; + return this; + } + + public Builder buffer(byte[] buffer) { + this.buffer = buffer; + return this; + } + + public Builder isOpen(boolean isOpen) { + this.isOpen = isOpen; + return this; + } + + public Builder chunkSize(int chunkSize) { + this.chunkSize = chunkSize; + return this; + } + + public abstract RestorableState build(); + } + + @Override + public int hashCode() { + return Objects.hash(serviceOptions, entity, uploadId, position, isOpen, chunkSize, + Arrays.hashCode(buffer)); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof BaseState)) { + return false; + } + final BaseState other = (BaseState) obj; + return Objects.equals(this.serviceOptions, other.serviceOptions) + && Objects.equals(this.entity, other.entity) + && Objects.equals(this.uploadId, other.uploadId) + && Objects.deepEquals(this.buffer, other.buffer) + && this.position == other.position + && this.isOpen == other.isOpen + && this.chunkSize == other.chunkSize; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("entity", entity) + .add("uploadId", uploadId) + .add("position", position) + .add("isOpen", isOpen) + .toString(); + } + } +} diff --git a/gcloud-java-core/src/main/java/com/google/gcloud/ReadChannel.java b/gcloud-java-core/src/main/java/com/google/gcloud/ReadChannel.java new file mode 100644 index 000000000000..7537c5a8ce0b --- /dev/null +++ b/gcloud-java-core/src/main/java/com/google/gcloud/ReadChannel.java @@ -0,0 +1,57 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.channels.ReadableByteChannel; + +/** + * A channel for reading data from a Google Cloud object. + * + *

Implementations of this class may buffer data internally to reduce remote calls. This + * interface implements {@link Restorable} to allow saving the reader's state to continue reading + * afterwards. + *

+ */ +public interface ReadChannel extends ReadableByteChannel, Closeable, Restorable { + + /** + * Overridden to remove IOException. + * + * @see java.nio.channels.Channel#close() + */ + @Override + void close(); + + void seek(int position) throws IOException; + + /** + * Sets the minimum size that will be read by a single RPC. + * Read data will be locally buffered until consumed. + */ + void chunkSize(int chunkSize); + + /** + * Captures the read channel state so that it can be saved and restored afterwards. + * + * @return a {@link RestorableState} object that contains the read channel state and can restore + * it afterwards. + */ + @Override + RestorableState capture(); +} diff --git a/gcloud-java-core/src/main/java/com/google/gcloud/WriteChannel.java b/gcloud-java-core/src/main/java/com/google/gcloud/WriteChannel.java new file mode 100644 index 000000000000..e6f06e23dc04 --- /dev/null +++ b/gcloud-java-core/src/main/java/com/google/gcloud/WriteChannel.java @@ -0,0 +1,48 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud; + +import java.io.Closeable; +import java.nio.channels.WritableByteChannel; + +/** + * A channel for writing data to Google Cloud services. + * + *

Implementations of this class may further buffer data internally to reduce remote calls. + * Written data will only be visible after calling {@link #close()}. This interface implements + * {@link Restorable} to allow saving the writer's state to continue writing afterwards. + *

+ */ +public interface WriteChannel extends WritableByteChannel, Closeable, Restorable { + + /** + * Sets the minimum size that will be written by a single RPC. + * Written data will be buffered and only flushed upon reaching this size or closing the channel. + */ + void chunkSize(int chunkSize); + + /** + * Captures the write channel state so that it can be saved and restored afterwards. The original + * {@code WriteChannel} and the restored one should not both be used. Closing one channel + * causes the other channel to close; subsequent writes will fail. + * + * @return a {@link RestorableState} object that contains the write channel state and can restore + * it afterwards. + */ + @Override + RestorableState capture(); +} diff --git a/gcloud-java-core/src/test/java/com/google/gcloud/BaseWriteChannelTest.java b/gcloud-java-core/src/test/java/com/google/gcloud/BaseWriteChannelTest.java new file mode 100644 index 000000000000..d80db3c5a461 --- /dev/null +++ b/gcloud-java-core/src/test/java/com/google/gcloud/BaseWriteChannelTest.java @@ -0,0 +1,138 @@ +/* + * Copyright 2015 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.gcloud; + +import static junit.framework.TestCase.assertFalse; +import static junit.framework.TestCase.assertTrue; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.gcloud.spi.ServiceRpcFactory; + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Random; + +public class BaseWriteChannelTest { + + private abstract static class CustomService implements Service {} + private abstract static class CustomServiceOptions + extends ServiceOptions { + + private static final long serialVersionUID = 3302358029307467197L; + + protected CustomServiceOptions( + Class> serviceFactoryClass, + Class> rpcFactoryClass, + Builder builder) { + super(serviceFactoryClass, rpcFactoryClass, builder); + } + } + + private static final Serializable ENTITY = 42L; + private static final String UPLOAD_ID = "uploadId"; + private static final byte[] CONTENT = {0xD, 0xE, 0xA, 0xD}; + private static final int MIN_CHUNK_SIZE = 256 * 1024; + private static final int DEFAULT_CHUNK_SIZE = 8 * MIN_CHUNK_SIZE; + private static final Random RANDOM = new Random(); + private static BaseWriteChannel channel; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Before + public void setUp() { + channel = new BaseWriteChannel(null, ENTITY, UPLOAD_ID) { + @Override + public RestorableState capture() { + return null; + } + + @Override + protected void write(int length, boolean last) {} + }; + } + + @Test + public void testConstructor() throws IOException { + assertEquals(null, channel.options); + assertEquals(ENTITY, channel.entity); + assertEquals(UPLOAD_ID, channel.uploadId); + assertTrue(channel.isOpen()); + assertArrayEquals(new byte[0], channel.buffer); + assertEquals(DEFAULT_CHUNK_SIZE, channel.chunkSize); + } + + @Test + public void testClose() throws IOException { + channel.close(); + assertFalse(channel.isOpen()); + assertNull(channel.buffer); + } + + @Test + public void testValidateOpen() throws IOException { + channel.validateOpen(); + channel.close(); + thrown.expect(IOException.class); + thrown.expectMessage("stream is closed"); + channel.validateOpen(); + } + + @Test + public void testChunkSize() throws IOException { + channel.chunkSize(42); + assertEquals(MIN_CHUNK_SIZE, channel.chunkSize); + channel.chunkSize(2 * MIN_CHUNK_SIZE); + assertEquals(2 * MIN_CHUNK_SIZE, channel.chunkSize); + channel.chunkSize(512 * 1025); + assertEquals(2 * MIN_CHUNK_SIZE, channel.chunkSize); + } + + @Test + public void testWrite() throws IOException { + channel.write(ByteBuffer.wrap(CONTENT)); + assertEquals(CONTENT.length, channel.limit); + assertEquals(DEFAULT_CHUNK_SIZE, channel.buffer.length); + assertArrayEquals(Arrays.copyOf(CONTENT, DEFAULT_CHUNK_SIZE), channel.buffer); + } + + @Test + public void testWriteAndFlush() throws IOException { + ByteBuffer content = randomBuffer(DEFAULT_CHUNK_SIZE + 1); + channel.write(content); + assertEquals(DEFAULT_CHUNK_SIZE, channel.position); + assertEquals(1, channel.limit); + byte[] newContent = new byte[DEFAULT_CHUNK_SIZE]; + newContent[0] = content.get(DEFAULT_CHUNK_SIZE); + assertArrayEquals(newContent, channel.buffer); + } + + private static ByteBuffer randomBuffer(int size) { + byte[] byteArray = new byte[size]; + RANDOM.nextBytes(byteArray); + return ByteBuffer.wrap(byteArray); + } +} diff --git a/gcloud-java-examples/src/main/java/com/google/gcloud/examples/BigQueryExample.java b/gcloud-java-examples/src/main/java/com/google/gcloud/examples/BigQueryExample.java index 1754be4df7dc..2f8a768f3669 100644 --- a/gcloud-java-examples/src/main/java/com/google/gcloud/examples/BigQueryExample.java +++ b/gcloud-java-examples/src/main/java/com/google/gcloud/examples/BigQueryExample.java @@ -17,6 +17,7 @@ package com.google.gcloud.examples; import com.google.common.collect.ImmutableMap; +import com.google.gcloud.WriteChannel; import com.google.gcloud.bigquery.BaseTableInfo; import com.google.gcloud.bigquery.BigQuery; import com.google.gcloud.bigquery.BigQueryError; @@ -33,6 +34,7 @@ import com.google.gcloud.bigquery.JobId; import com.google.gcloud.bigquery.JobInfo; import com.google.gcloud.bigquery.JobStatus; +import com.google.gcloud.bigquery.LoadConfiguration; import com.google.gcloud.bigquery.LoadJobInfo; import com.google.gcloud.bigquery.QueryRequest; import com.google.gcloud.bigquery.QueryResponse; @@ -42,6 +44,9 @@ import com.google.gcloud.bigquery.ViewInfo; import com.google.gcloud.spi.BigQueryRpc.Tuple; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Paths; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; @@ -77,7 +82,8 @@ * copy | * load
+ | * extract
+ | - * query "} + * query | + * load-file
"} * * * @@ -523,7 +529,7 @@ void run(BigQuery bigquery, JobInfo job) throws Exception { startedJob = bigquery.getJob(startedJob.jobId()); } if (startedJob.status().error() == null) { - System.out.println("Job " + startedJob.jobId().job() + " suceeded"); + System.out.println("Job " + startedJob.jobId().job() + " succeeded"); } else { System.out.println("Job " + startedJob.jobId().job() + " failed"); System.out.println("Error: " + startedJob.status().error()); @@ -544,8 +550,8 @@ LoadJobInfo parse(String... args) throws Exception { String table = args[1]; String format = args[2]; TableId tableId = TableId.of(dataset, table); - return LoadJobInfo.builder(tableId, Arrays.asList(args).subList(3, args.length)) - .formatOptions(FormatOptions.of(format)) + LoadConfiguration configuration = LoadConfiguration.of(tableId, FormatOptions.of(format)); + return LoadJobInfo.builder(configuration, Arrays.asList(args).subList(3, args.length)) .build(); } throw new IllegalArgumentException("Missing required arguments."); @@ -659,6 +665,47 @@ protected String params() { } } + /** + * This class demonstrates how to load data into a BigQuery Table from a local file. + * + * @see Resumable + * Upload + */ + private static class LoadFileAction extends BigQueryAction> { + @Override + void run(BigQuery bigquery, Tuple configuration) throws Exception { + System.out.println("Running insert"); + try (FileChannel fileChannel = FileChannel.open(Paths.get(configuration.y()))) { + ByteBuffer buffer = ByteBuffer.allocate(256 * 1024); + WriteChannel writeChannel = bigquery.writer(configuration.x()); + while (fileChannel.read(buffer) > 0) { + buffer.flip(); + writeChannel.write(buffer); + buffer.clear(); + } + writeChannel.close(); + } + } + + @Override + Tuple parse(String... args) throws Exception { + if (args.length == 4) { + String dataset = args[0]; + String table = args[1]; + String format = args[2]; + TableId tableId = TableId.of(dataset, table); + LoadConfiguration configuration = LoadConfiguration.of(tableId, FormatOptions.of(format)); + return Tuple.of(configuration, args[3]); + } + throw new IllegalArgumentException("Missing required arguments."); + } + + @Override + protected String params() { + return "
"; + } + } + static { CREATE_ACTIONS.put("dataset", new CreateDatasetAction()); CREATE_ACTIONS.put("table", new CreateSimpleTableAction()); @@ -682,6 +729,7 @@ protected String params() { ACTIONS.put("extract", new ExtractAction()); ACTIONS.put("copy", new CopyAction()); ACTIONS.put("query", new QueryAction()); + ACTIONS.put("load-file", new LoadFileAction()); } private static void printUsage() { diff --git a/gcloud-java-examples/src/main/java/com/google/gcloud/examples/StorageExample.java b/gcloud-java-examples/src/main/java/com/google/gcloud/examples/StorageExample.java index dc3dba6c72ab..e3bee626f49c 100644 --- a/gcloud-java-examples/src/main/java/com/google/gcloud/examples/StorageExample.java +++ b/gcloud-java-examples/src/main/java/com/google/gcloud/examples/StorageExample.java @@ -18,12 +18,12 @@ import com.google.gcloud.AuthCredentials; import com.google.gcloud.AuthCredentials.ServiceAccountAuthCredentials; +import com.google.gcloud.ReadChannel; +import com.google.gcloud.WriteChannel; import com.google.gcloud.spi.StorageRpc.Tuple; import com.google.gcloud.storage.Blob; import com.google.gcloud.storage.BlobId; import com.google.gcloud.storage.BlobInfo; -import com.google.gcloud.storage.BlobReadChannel; -import com.google.gcloud.storage.BlobWriteChannel; import com.google.gcloud.storage.Bucket; import com.google.gcloud.storage.BucketInfo; import com.google.gcloud.storage.CopyWriter; @@ -258,7 +258,7 @@ private void run(Storage storage, Path uploadFrom, BlobInfo blobInfo) throws IOE // When content is not available or large (1MB or more) it is recommended // to write it in chunks via the blob's channel writer. Blob blob = new Blob(storage, blobInfo); - try (BlobWriteChannel writer = blob.writer()) { + try (WriteChannel writer = blob.writer()) { byte[] buffer = new byte[1024]; try (InputStream input = Files.newInputStream(uploadFrom)) { int limit; @@ -326,7 +326,7 @@ private void run(Storage storage, BlobId blobId, Path downloadTo) throws IOExcep writeTo.write(content); } else { // When Blob size is big or unknown use the blob's channel reader. - try (BlobReadChannel reader = blob.reader()) { + try (ReadChannel reader = blob.reader()) { WritableByteChannel channel = Channels.newChannel(writeTo); ByteBuffer bytes = ByteBuffer.allocate(64 * 1024); while (reader.read(bytes) > 0) { diff --git a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Blob.java b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Blob.java index 5b305d15cee4..fe65f6ee010b 100644 --- a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Blob.java +++ b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Blob.java @@ -24,6 +24,8 @@ import com.google.common.base.Function; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import com.google.gcloud.ReadChannel; +import com.google.gcloud.WriteChannel; import com.google.gcloud.spi.StorageRpc; import com.google.gcloud.storage.Storage.BlobTargetOption; import com.google.gcloud.storage.Storage.BlobWriteOption; @@ -321,24 +323,24 @@ public CopyWriter copyTo(String targetBucket, String targetBlob, BlobSourceOptio } /** - * Returns a {@code BlobReadChannel} object for reading this blob's content. + * Returns a {@code ReadChannel} object for reading this blob's content. * * @param options blob read options * @throws StorageException upon failure */ - public BlobReadChannel reader(BlobSourceOption... options) { + public ReadChannel reader(BlobSourceOption... options) { return storage.reader(info.blobId(), toSourceOptions(info, options)); } /** - * Returns a {@code BlobWriteChannel} object for writing to this blob. By default any md5 and + * Returns a {@code WriteChannel} object for writing to this blob. By default any md5 and * crc32c values in the current blob are ignored unless requested via the * {@code BlobWriteOption.md5Match} and {@code BlobWriteOption.crc32cMatch} options. * * @param options target blob options * @throws StorageException upon failure */ - public BlobWriteChannel writer(BlobWriteOption... options) { + public WriteChannel writer(BlobWriteOption... options) { return storage.writer(info, options); } diff --git a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobReadChannel.java b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobReadChannel.java index 106d18466dac..984f5d1f72e9 100644 --- a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobReadChannel.java +++ b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobReadChannel.java @@ -16,46 +16,264 @@ package com.google.gcloud.storage; -import com.google.gcloud.Restorable; +import static com.google.gcloud.RetryHelper.runWithRetries; + +import com.google.api.services.storage.model.StorageObject; +import com.google.common.base.MoreObjects; +import com.google.gcloud.ReadChannel; import com.google.gcloud.RestorableState; +import com.google.gcloud.RetryHelper; +import com.google.gcloud.spi.StorageRpc; +import com.google.gcloud.spi.StorageRpc.Tuple; -import java.io.Closeable; import java.io.IOException; -import java.nio.channels.ReadableByteChannel; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.Callable; /** - * A channel for reading data from a Google Cloud Storage object. - * - *

Implementations of this class may buffer data internally to reduce remote calls. This - * interface implements {@link Restorable} to allow saving the reader's state to continue reading - * afterwards. - *

+ * Default implementation for ReadChannel. */ -public interface BlobReadChannel extends ReadableByteChannel, Closeable, - Restorable { - - /** - * Overridden to remove IOException. - * - * @see java.nio.channels.Channel#close() - */ +class BlobReadChannel implements ReadChannel { + + private static final int DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024; + + private final StorageOptions serviceOptions; + private final BlobId blob; + private final Map requestOptions; + private String lastEtag; + private int position; + private boolean isOpen; + private boolean endOfStream; + private int chunkSize = DEFAULT_CHUNK_SIZE; + + private final StorageRpc storageRpc; + private final StorageObject storageObject; + private int bufferPos; + private byte[] buffer; + + BlobReadChannel(StorageOptions serviceOptions, BlobId blob, + Map requestOptions) { + this.serviceOptions = serviceOptions; + this.blob = blob; + this.requestOptions = requestOptions; + isOpen = true; + storageRpc = serviceOptions.rpc(); + storageObject = blob.toPb(); + } + + @Override + public RestorableState capture() { + StateImpl.Builder builder = StateImpl.builder(serviceOptions, blob, requestOptions) + .position(position) + .isOpen(isOpen) + .endOfStream(endOfStream) + .chunkSize(chunkSize); + if (buffer != null) { + builder.position(position + bufferPos); + builder.endOfStream(false); + } + return builder.build(); + } + @Override - void close(); - - void seek(int position) throws IOException; - - /** - * Sets the minimum size that will be read by a single RPC. - * Read data will be locally buffered until consumed. - */ - void chunkSize(int chunkSize); - - /** - * Captures the read channel state so that it can be saved and restored afterwards. - * - * @return a {@link RestorableState} object that contains the read channel state and can restore - * it afterwards. - */ + public boolean isOpen() { + return isOpen; + } + + @Override + public void close() { + if (isOpen) { + buffer = null; + isOpen = false; + } + } + + private void validateOpen() throws IOException { + if (!isOpen) { + throw new IOException("stream is closed"); + } + } + + @Override + public void seek(int position) throws IOException { + validateOpen(); + this.position = position; + buffer = null; + bufferPos = 0; + endOfStream = false; + } + + @Override + public void chunkSize(int chunkSize) { + this.chunkSize = chunkSize <= 0 ? DEFAULT_CHUNK_SIZE : chunkSize; + } + @Override - RestorableState capture(); + public int read(ByteBuffer byteBuffer) throws IOException { + validateOpen(); + if (buffer == null) { + if (endOfStream) { + return -1; + } + final int toRead = Math.max(byteBuffer.remaining(), chunkSize); + try { + Tuple result = runWithRetries(new Callable>() { + @Override + public Tuple call() { + return storageRpc.read(storageObject, requestOptions, position, toRead); + } + }, serviceOptions.retryParams(), StorageImpl.EXCEPTION_HANDLER); + if (lastEtag != null && !Objects.equals(result.x(), lastEtag)) { + StringBuilder messageBuilder = new StringBuilder(); + messageBuilder.append("Blob ").append(blob).append(" was updated while reading"); + throw new StorageException(0, messageBuilder.toString(), false); + } + lastEtag = result.x(); + buffer = result.y(); + } catch (RetryHelper.RetryHelperException e) { + throw StorageException.translateAndThrow(e); + } + if (toRead > buffer.length) { + endOfStream = true; + if (buffer.length == 0) { + buffer = null; + return -1; + } + } + } + int toWrite = Math.min(buffer.length - bufferPos, byteBuffer.remaining()); + byteBuffer.put(buffer, bufferPos, toWrite); + bufferPos += toWrite; + if (bufferPos >= buffer.length) { + position += buffer.length; + buffer = null; + bufferPos = 0; + } + return toWrite; + } + + static class StateImpl implements RestorableState, Serializable { + + private static final long serialVersionUID = 3889420316004453706L; + + private final StorageOptions serviceOptions; + private final BlobId blob; + private final Map requestOptions; + private final String lastEtag; + private final int position; + private final boolean isOpen; + private final boolean endOfStream; + private final int chunkSize; + + StateImpl(Builder builder) { + this.serviceOptions = builder.serviceOptions; + this.blob = builder.blob; + this.requestOptions = builder.requestOptions; + this.lastEtag = builder.lastEtag; + this.position = builder.position; + this.isOpen = builder.isOpen; + this.endOfStream = builder.endOfStream; + this.chunkSize = builder.chunkSize; + } + + static class Builder { + private final StorageOptions serviceOptions; + private final BlobId blob; + private final Map requestOptions; + private String lastEtag; + private int position; + private boolean isOpen; + private boolean endOfStream; + private int chunkSize; + + private Builder(StorageOptions options, BlobId blob, Map reqOptions) { + this.serviceOptions = options; + this.blob = blob; + this.requestOptions = reqOptions; + } + + Builder lastEtag(String lastEtag) { + this.lastEtag = lastEtag; + return this; + } + + Builder position(int position) { + this.position = position; + return this; + } + + Builder isOpen(boolean isOpen) { + this.isOpen = isOpen; + return this; + } + + Builder endOfStream(boolean endOfStream) { + this.endOfStream = endOfStream; + return this; + } + + Builder chunkSize(int chunkSize) { + this.chunkSize = chunkSize; + return this; + } + + RestorableState build() { + return new StateImpl(this); + } + } + + static Builder builder( + StorageOptions options, BlobId blob, Map reqOptions) { + return new Builder(options, blob, reqOptions); + } + + @Override + public ReadChannel restore() { + BlobReadChannel channel = new BlobReadChannel(serviceOptions, blob, requestOptions); + channel.lastEtag = lastEtag; + channel.position = position; + channel.isOpen = isOpen; + channel.endOfStream = endOfStream; + channel.chunkSize = chunkSize; + return channel; + } + + @Override + public int hashCode() { + return Objects.hash(serviceOptions, blob, requestOptions, lastEtag, position, isOpen, + endOfStream, chunkSize); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof StateImpl)) { + return false; + } + final StateImpl other = (StateImpl) obj; + return Objects.equals(this.serviceOptions, other.serviceOptions) + && Objects.equals(this.blob, other.blob) + && Objects.equals(this.requestOptions, other.requestOptions) + && Objects.equals(this.lastEtag, other.lastEtag) + && this.position == other.position + && this.isOpen == other.isOpen + && this.endOfStream == other.endOfStream + && this.chunkSize == other.chunkSize; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("blob", blob) + .add("position", position) + .add("isOpen", isOpen) + .add("endOfStream", endOfStream) + .toString(); + } + } } diff --git a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobReadChannelImpl.java b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobReadChannelImpl.java deleted file mode 100644 index 8fe6eae66d8f..000000000000 --- a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobReadChannelImpl.java +++ /dev/null @@ -1,278 +0,0 @@ -/* - * Copyright 2015 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.gcloud.storage; - -import static com.google.gcloud.RetryHelper.runWithRetries; - -import com.google.api.services.storage.model.StorageObject; -import com.google.common.base.MoreObjects; -import com.google.gcloud.RestorableState; -import com.google.gcloud.RetryHelper; -import com.google.gcloud.spi.StorageRpc; -import com.google.gcloud.spi.StorageRpc.Tuple; - -import java.io.IOException; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.Callable; - -/** - * Default implementation for BlobReadChannel. - */ -class BlobReadChannelImpl implements BlobReadChannel { - - private static final int DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024; - - private final StorageOptions serviceOptions; - private final BlobId blob; - private final Map requestOptions; - private String lastEtag; - private int position; - private boolean isOpen; - private boolean endOfStream; - private int chunkSize = DEFAULT_CHUNK_SIZE; - - private final StorageRpc storageRpc; - private final StorageObject storageObject; - private int bufferPos; - private byte[] buffer; - - BlobReadChannelImpl(StorageOptions serviceOptions, BlobId blob, - Map requestOptions) { - this.serviceOptions = serviceOptions; - this.blob = blob; - this.requestOptions = requestOptions; - isOpen = true; - storageRpc = serviceOptions.rpc(); - storageObject = blob.toPb(); - } - - @Override - public RestorableState capture() { - StateImpl.Builder builder = StateImpl.builder(serviceOptions, blob, requestOptions) - .position(position) - .isOpen(isOpen) - .endOfStream(endOfStream) - .chunkSize(chunkSize); - if (buffer != null) { - builder.position(position + bufferPos); - builder.endOfStream(false); - } - return builder.build(); - } - - @Override - public boolean isOpen() { - return isOpen; - } - - @Override - public void close() { - if (isOpen) { - buffer = null; - isOpen = false; - } - } - - private void validateOpen() throws IOException { - if (!isOpen) { - throw new IOException("stream is closed"); - } - } - - @Override - public void seek(int position) throws IOException { - validateOpen(); - this.position = position; - buffer = null; - bufferPos = 0; - endOfStream = false; - } - - @Override - public void chunkSize(int chunkSize) { - this.chunkSize = chunkSize <= 0 ? DEFAULT_CHUNK_SIZE : chunkSize; - } - - @Override - public int read(ByteBuffer byteBuffer) throws IOException { - validateOpen(); - if (buffer == null) { - if (endOfStream) { - return -1; - } - final int toRead = Math.max(byteBuffer.remaining(), chunkSize); - try { - Tuple result = runWithRetries(new Callable>() { - @Override - public Tuple call() { - return storageRpc.read(storageObject, requestOptions, position, toRead); - } - }, serviceOptions.retryParams(), StorageImpl.EXCEPTION_HANDLER); - if (lastEtag != null && !Objects.equals(result.x(), lastEtag)) { - StringBuilder messageBuilder = new StringBuilder(); - messageBuilder.append("Blob ").append(blob).append(" was updated while reading"); - throw new StorageException(0, messageBuilder.toString(), false); - } - lastEtag = result.x(); - buffer = result.y(); - } catch (RetryHelper.RetryHelperException e) { - throw StorageException.translateAndThrow(e); - } - if (toRead > buffer.length) { - endOfStream = true; - if (buffer.length == 0) { - buffer = null; - return -1; - } - } - } - int toWrite = Math.min(buffer.length - bufferPos, byteBuffer.remaining()); - byteBuffer.put(buffer, bufferPos, toWrite); - bufferPos += toWrite; - if (bufferPos >= buffer.length) { - position += buffer.length; - buffer = null; - bufferPos = 0; - } - return toWrite; - } - - static class StateImpl implements RestorableState, Serializable { - - private static final long serialVersionUID = 3889420316004453706L; - - private final StorageOptions serviceOptions; - private final BlobId blob; - private final Map requestOptions; - private final String lastEtag; - private final int position; - private final boolean isOpen; - private final boolean endOfStream; - private final int chunkSize; - - StateImpl(Builder builder) { - this.serviceOptions = builder.serviceOptions; - this.blob = builder.blob; - this.requestOptions = builder.requestOptions; - this.lastEtag = builder.lastEtag; - this.position = builder.position; - this.isOpen = builder.isOpen; - this.endOfStream = builder.endOfStream; - this.chunkSize = builder.chunkSize; - } - - static class Builder { - private final StorageOptions serviceOptions; - private final BlobId blob; - private final Map requestOptions; - private String lastEtag; - private int position; - private boolean isOpen; - private boolean endOfStream; - private int chunkSize; - - private Builder(StorageOptions options, BlobId blob, Map reqOptions) { - this.serviceOptions = options; - this.blob = blob; - this.requestOptions = reqOptions; - } - - Builder lastEtag(String lastEtag) { - this.lastEtag = lastEtag; - return this; - } - - Builder position(int position) { - this.position = position; - return this; - } - - Builder isOpen(boolean isOpen) { - this.isOpen = isOpen; - return this; - } - - Builder endOfStream(boolean endOfStream) { - this.endOfStream = endOfStream; - return this; - } - - Builder chunkSize(int chunkSize) { - this.chunkSize = chunkSize; - return this; - } - - RestorableState build() { - return new StateImpl(this); - } - } - - static Builder builder( - StorageOptions options, BlobId blob, Map reqOptions) { - return new Builder(options, blob, reqOptions); - } - - @Override - public BlobReadChannel restore() { - BlobReadChannelImpl channel = new BlobReadChannelImpl(serviceOptions, blob, requestOptions); - channel.lastEtag = lastEtag; - channel.position = position; - channel.isOpen = isOpen; - channel.endOfStream = endOfStream; - channel.chunkSize = chunkSize; - return channel; - } - - @Override - public int hashCode() { - return Objects.hash(serviceOptions, blob, requestOptions, lastEtag, position, isOpen, - endOfStream, chunkSize); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (!(obj instanceof StateImpl)) { - return false; - } - final StateImpl other = (StateImpl) obj; - return Objects.equals(this.serviceOptions, other.serviceOptions) - && Objects.equals(this.blob, other.blob) - && Objects.equals(this.requestOptions, other.requestOptions) - && Objects.equals(this.lastEtag, other.lastEtag) - && this.position == other.position - && this.isOpen == other.isOpen - && this.endOfStream == other.endOfStream - && this.chunkSize == other.chunkSize; - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("blob", blob) - .add("position", position) - .add("isOpen", isOpen) - .add("endOfStream", endOfStream) - .toString(); - } - } -} diff --git a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobWriteChannel.java b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobWriteChannel.java index 9682c6345659..1b518af19871 100644 --- a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobWriteChannel.java +++ b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobWriteChannel.java @@ -16,37 +16,95 @@ package com.google.gcloud.storage; -import com.google.gcloud.Restorable; +import static com.google.gcloud.RetryHelper.runWithRetries; +import static java.util.concurrent.Executors.callable; + +import com.google.gcloud.BaseWriteChannel; import com.google.gcloud.RestorableState; +import com.google.gcloud.RetryHelper; +import com.google.gcloud.WriteChannel; +import com.google.gcloud.spi.StorageRpc; -import java.io.Closeable; -import java.nio.channels.WritableByteChannel; +import java.util.Arrays; +import java.util.Map; /** - * A channel for writing data to a Google Cloud Storage object. - * - *

Implementations of this class may further buffer data internally to reduce remote calls. - * Written data will only be visible after calling {@link #close()}. This interface implements - * {@link Restorable} to allow saving the writer's state to continue writing afterwards. - *

+ * Write channel implementation to upload Google Cloud Storage blobs. */ -public interface BlobWriteChannel extends WritableByteChannel, Closeable, - Restorable { - - /** - * Sets the minimum size that will be written by a single RPC. - * Written data will be buffered and only flushed upon reaching this size or closing the channel. - */ - void chunkSize(int chunkSize); - - /** - * Captures the write channel state so that it can be saved and restored afterwards. The original - * {@code BlobWriteChannel} and the restored one should not both be used. Closing one channel - * causes the other channel to close, subsequent writes will fail. - * - * @return a {@link RestorableState} object that contains the write channel state and can restore - * it afterwards. - */ +class BlobWriteChannel extends BaseWriteChannel { + + BlobWriteChannel(StorageOptions options, BlobInfo blob, Map optionsMap) { + this(options, blob, options.rpc().open(blob.toPb(), optionsMap)); + } + + BlobWriteChannel(StorageOptions options, BlobInfo blobInfo, String uploadId) { + super(options, blobInfo, uploadId); + } + + @Override + protected void write(final int length, final boolean last) { + try { + runWithRetries(callable(new Runnable() { + @Override + public void run() { + options.rpc().write(uploadId, buffer, 0, position, length, last); + } + }), options.retryParams(), StorageImpl.EXCEPTION_HANDLER); + } catch (RetryHelper.RetryHelperException e) { + throw StorageException.translateAndThrow(e); + } + } + @Override - RestorableState capture(); + public RestorableState capture() { + byte[] bufferToSave = null; + if (isOpen) { + flush(); + bufferToSave = Arrays.copyOf(buffer, limit); + } + return StateImpl.builder(options, entity, uploadId) + .position(position) + .buffer(bufferToSave) + .isOpen(isOpen) + .chunkSize(chunkSize) + .build(); + } + + static class StateImpl extends BaseWriteChannel.BaseState { + + private static final long serialVersionUID = -9028324143780151286L; + + StateImpl(Builder builder) { + super(builder); + } + + static class Builder extends BaseWriteChannel.BaseState.Builder { + + private Builder(StorageOptions options, BlobInfo blobInfo, String uploadId) { + super(options, blobInfo, uploadId); + } + + @Override + public RestorableState build() { + return new StateImpl(this); + } + } + + static Builder builder(StorageOptions options, BlobInfo blobInfo, String uploadId) { + return new Builder(options, blobInfo, uploadId); + } + + @Override + public WriteChannel restore() { + BlobWriteChannel channel = new BlobWriteChannel(serviceOptions, entity, uploadId); + if (buffer != null) { + channel.buffer = buffer.clone(); + channel.limit = buffer.length; + } + channel.position = position; + channel.isOpen = isOpen; + channel.chunkSize = chunkSize; + return channel; + } + } } diff --git a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobWriteChannelImpl.java b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobWriteChannelImpl.java deleted file mode 100644 index acde4178533c..000000000000 --- a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/BlobWriteChannelImpl.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright 2015 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.gcloud.storage; - -import static com.google.gcloud.RetryHelper.runWithRetries; -import static java.util.concurrent.Executors.callable; - -import com.google.api.services.storage.model.StorageObject; -import com.google.common.base.MoreObjects; -import com.google.gcloud.RestorableState; -import com.google.gcloud.RetryHelper; -import com.google.gcloud.spi.StorageRpc; - -import java.io.IOException; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.Map; -import java.util.Objects; - -/** - * Default implementation for BlobWriteChannel. - */ -class BlobWriteChannelImpl implements BlobWriteChannel { - - private static final int MIN_CHUNK_SIZE = 256 * 1024; - private static final int DEFAULT_CHUNK_SIZE = 8 * MIN_CHUNK_SIZE; - - private final StorageOptions options; - private final BlobInfo blobInfo; - private final String uploadId; - private int position; - private byte[] buffer = new byte[0]; - private int limit; - private boolean isOpen = true; - private int chunkSize = DEFAULT_CHUNK_SIZE; - - private final StorageRpc storageRpc; - private final StorageObject storageObject; - - BlobWriteChannelImpl(StorageOptions options, BlobInfo blobInfo, - Map optionsMap) { - this.options = options; - this.blobInfo = blobInfo; - storageRpc = options.rpc(); - storageObject = blobInfo.toPb(); - uploadId = storageRpc.open(storageObject, optionsMap); - } - - BlobWriteChannelImpl(StorageOptions options, BlobInfo blobInfo, String uploadId) { - this.options = options; - this.blobInfo = blobInfo; - this.uploadId = uploadId; - storageRpc = options.rpc(); - storageObject = blobInfo.toPb(); - } - - @Override - public RestorableState capture() { - byte[] bufferToSave = null; - if (isOpen) { - flush(); - bufferToSave = Arrays.copyOf(buffer, limit); - } - return StateImpl.builder(options, blobInfo, uploadId) - .position(position) - .buffer(bufferToSave) - .isOpen(isOpen) - .chunkSize(chunkSize) - .build(); - } - - private void flush() { - if (limit >= chunkSize) { - final int length = limit - limit % MIN_CHUNK_SIZE; - try { - runWithRetries(callable(new Runnable() { - @Override - public void run() { - storageRpc.write(uploadId, buffer, 0, position, length, false); - } - }), options.retryParams(), StorageImpl.EXCEPTION_HANDLER); - } catch (RetryHelper.RetryHelperException e) { - throw StorageException.translateAndThrow(e); - } - position += length; - limit -= length; - byte[] temp = new byte[chunkSize]; - System.arraycopy(buffer, length, temp, 0, limit); - buffer = temp; - } - } - - private void validateOpen() throws IOException { - if (!isOpen) { - throw new IOException("stream is closed"); - } - } - - @Override - public int write(ByteBuffer byteBuffer) throws IOException { - validateOpen(); - int toWrite = byteBuffer.remaining(); - int spaceInBuffer = buffer.length - limit; - if (spaceInBuffer >= toWrite) { - byteBuffer.get(buffer, limit, toWrite); - } else { - buffer = Arrays.copyOf(buffer, Math.max(chunkSize, buffer.length + toWrite - spaceInBuffer)); - byteBuffer.get(buffer, limit, toWrite); - } - limit += toWrite; - flush(); - return toWrite; - } - - @Override - public boolean isOpen() { - return isOpen; - } - - @Override - public void close() throws IOException { - if (isOpen) { - try { - runWithRetries(callable(new Runnable() { - @Override - public void run() { - storageRpc.write(uploadId, buffer, 0, position, limit, true); - } - }), options.retryParams(), StorageImpl.EXCEPTION_HANDLER); - } catch (RetryHelper.RetryHelperException e) { - throw StorageException.translateAndThrow(e); - } - position += buffer.length; - isOpen = false; - buffer = null; - } - } - - @Override - public void chunkSize(int chunkSize) { - chunkSize = (chunkSize / MIN_CHUNK_SIZE) * MIN_CHUNK_SIZE; - this.chunkSize = Math.max(MIN_CHUNK_SIZE, chunkSize); - } - - static class StateImpl implements RestorableState, Serializable { - - private static final long serialVersionUID = 8541062465055125619L; - - private final StorageOptions serviceOptions; - private final BlobInfo blobInfo; - private final String uploadId; - private final int position; - private final byte[] buffer; - private final boolean isOpen; - private final int chunkSize; - - StateImpl(Builder builder) { - this.serviceOptions = builder.serviceOptions; - this.blobInfo = builder.blobInfo; - this.uploadId = builder.uploadId; - this.position = builder.position; - this.buffer = builder.buffer; - this.isOpen = builder.isOpen; - this.chunkSize = builder.chunkSize; - } - - static class Builder { - private final StorageOptions serviceOptions; - private final BlobInfo blobInfo; - private final String uploadId; - private int position; - private byte[] buffer; - private boolean isOpen; - private int chunkSize; - - private Builder(StorageOptions options, BlobInfo blobInfo, String uploadId) { - this.serviceOptions = options; - this.blobInfo = blobInfo; - this.uploadId = uploadId; - } - - Builder position(int position) { - this.position = position; - return this; - } - - Builder buffer(byte[] buffer) { - this.buffer = buffer; - return this; - } - - Builder isOpen(boolean isOpen) { - this.isOpen = isOpen; - return this; - } - - Builder chunkSize(int chunkSize) { - this.chunkSize = chunkSize; - return this; - } - - RestorableState build() { - return new StateImpl(this); - } - } - - static Builder builder(StorageOptions options, BlobInfo blobInfo, String uploadId) { - return new Builder(options, blobInfo, uploadId); - } - - @Override - public BlobWriteChannel restore() { - BlobWriteChannelImpl channel = new BlobWriteChannelImpl(serviceOptions, blobInfo, uploadId); - if (buffer != null) { - channel.buffer = buffer.clone(); - channel.limit = buffer.length; - } - channel.position = position; - channel.isOpen = isOpen; - channel.chunkSize = chunkSize; - return channel; - } - - @Override - public int hashCode() { - return Objects.hash(serviceOptions, blobInfo, uploadId, position, isOpen, chunkSize, - Arrays.hashCode(buffer)); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (!(obj instanceof StateImpl)) { - return false; - } - final StateImpl other = (StateImpl) obj; - return Objects.equals(this.serviceOptions, other.serviceOptions) - && Objects.equals(this.blobInfo, other.blobInfo) - && Objects.equals(this.uploadId, other.uploadId) - && Objects.deepEquals(this.buffer, other.buffer) - && this.position == other.position - && this.isOpen == other.isOpen - && this.chunkSize == other.chunkSize; - } - - @Override - public String toString() { - return MoreObjects.toStringHelper(this) - .add("blobInfo", blobInfo) - .add("uploadId", uploadId) - .add("position", position) - .add("isOpen", isOpen) - .toString(); - } - } -} diff --git a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Storage.java b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Storage.java index 85e79a8e9abf..f8c90ff42930 100644 --- a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Storage.java +++ b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/Storage.java @@ -26,7 +26,9 @@ import com.google.common.collect.Sets; import com.google.gcloud.AuthCredentials.ServiceAccountAuthCredentials; import com.google.gcloud.Page; +import com.google.gcloud.ReadChannel; import com.google.gcloud.Service; +import com.google.gcloud.WriteChannel; import com.google.gcloud.spi.StorageRpc; import com.google.gcloud.spi.StorageRpc.Tuple; @@ -1423,7 +1425,7 @@ private static void checkContentType(BlobInfo blobInfo) throws IllegalArgumentEx * * @throws StorageException upon failure */ - BlobReadChannel reader(String bucket, String blob, BlobSourceOption... options); + ReadChannel reader(String bucket, String blob, BlobSourceOption... options); /** * Return a channel for reading the blob's content. If {@code blob.generation()} is set @@ -1439,7 +1441,7 @@ private static void checkContentType(BlobInfo blobInfo) throws IllegalArgumentEx * * @throws StorageException upon failure */ - BlobReadChannel reader(BlobId blob, BlobSourceOption... options); + ReadChannel reader(BlobId blob, BlobSourceOption... options); /** * Create a blob and return a channel for writing its content. By default any md5 and crc32c @@ -1448,7 +1450,7 @@ private static void checkContentType(BlobInfo blobInfo) throws IllegalArgumentEx * * @throws StorageException upon failure */ - BlobWriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options); + WriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options); /** * Generates a signed URL for a blob. diff --git a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/StorageImpl.java b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/StorageImpl.java index 93fc202febef..a6c851d0f638 100644 --- a/gcloud-java-storage/src/main/java/com/google/gcloud/storage/StorageImpl.java +++ b/gcloud-java-storage/src/main/java/com/google/gcloud/storage/StorageImpl.java @@ -49,6 +49,7 @@ import com.google.gcloud.Page; import com.google.gcloud.PageImpl; import com.google.gcloud.PageImpl.NextPageFetcher; +import com.google.gcloud.ReadChannel; import com.google.gcloud.RetryHelper.RetryHelperException; import com.google.gcloud.spi.StorageRpc; import com.google.gcloud.spi.StorageRpc.RewriteResponse; @@ -517,15 +518,15 @@ private List> transformBatch } @Override - public BlobReadChannel reader(String bucket, String blob, BlobSourceOption... options) { + public ReadChannel reader(String bucket, String blob, BlobSourceOption... options) { Map optionsMap = optionMap(options); - return new BlobReadChannelImpl(options(), BlobId.of(bucket, blob), optionsMap); + return new BlobReadChannel(options(), BlobId.of(bucket, blob), optionsMap); } @Override - public BlobReadChannel reader(BlobId blob, BlobSourceOption... options) { + public ReadChannel reader(BlobId blob, BlobSourceOption... options) { Map optionsMap = optionMap(blob, options); - return new BlobReadChannelImpl(options(), blob, optionsMap); + return new BlobReadChannel(options(), blob, optionsMap); } @Override @@ -536,7 +537,7 @@ public BlobWriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { private BlobWriteChannel writer(BlobInfo blobInfo, BlobTargetOption... options) { final Map optionsMap = optionMap(blobInfo, options); - return new BlobWriteChannelImpl(options(), blobInfo, optionsMap); + return new BlobWriteChannel(options(), blobInfo, optionsMap); } @Override diff --git a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobReadChannelImplTest.java b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobReadChannelTest.java similarity index 86% rename from gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobReadChannelImplTest.java rename to gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobReadChannelTest.java index 7daf4a6fb468..ffb37e8c5032 100644 --- a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobReadChannelImplTest.java +++ b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobReadChannelTest.java @@ -27,6 +27,7 @@ import static org.junit.Assert.fail; import com.google.common.collect.ImmutableMap; +import com.google.gcloud.ReadChannel; import com.google.gcloud.RestorableState; import com.google.gcloud.RetryParams; import com.google.gcloud.spi.StorageRpc; @@ -42,7 +43,7 @@ import java.util.Map; import java.util.Random; -public class BlobReadChannelImplTest { +public class BlobReadChannelTest { private static final String BUCKET_NAME = "b"; private static final String BLOB_NAME = "n"; @@ -55,7 +56,7 @@ public class BlobReadChannelImplTest { private StorageOptions options; private StorageRpcFactory rpcFactoryMock; private StorageRpc storageRpcMock; - private BlobReadChannelImpl reader; + private BlobReadChannel reader; @Before public void setUp() { @@ -78,13 +79,13 @@ public void tearDown() throws Exception { @Test public void testCreate() { replay(storageRpcMock); - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); assertTrue(reader.isOpen()); } @Test public void testReadBuffered() throws IOException { - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); byte[] result = randomByteArray(DEFAULT_CHUNK_SIZE); ByteBuffer firstReadBuffer = ByteBuffer.allocate(42); ByteBuffer secondReadBuffer = ByteBuffer.allocate(42); @@ -102,7 +103,7 @@ public void testReadBuffered() throws IOException { @Test public void testReadBig() throws IOException { - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); reader.chunkSize(CUSTOM_CHUNK_SIZE); byte[] firstResult = randomByteArray(DEFAULT_CHUNK_SIZE); byte[] secondResult = randomByteArray(DEFAULT_CHUNK_SIZE); @@ -123,7 +124,7 @@ public void testReadBig() throws IOException { @Test public void testReadFinish() throws IOException { - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); byte[] result = {}; ByteBuffer readBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); expect(storageRpcMock.read(BLOB_ID.toPb(), EMPTY_RPC_OPTIONS, 0, DEFAULT_CHUNK_SIZE)) @@ -134,7 +135,7 @@ public void testReadFinish() throws IOException { @Test public void testSeek() throws IOException { - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); reader.seek(42); byte[] result = randomByteArray(DEFAULT_CHUNK_SIZE); ByteBuffer readBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); @@ -148,7 +149,7 @@ public void testSeek() throws IOException { @Test public void testClose() { replay(storageRpcMock); - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); assertTrue(reader.isOpen()); reader.close(); assertTrue(!reader.isOpen()); @@ -157,7 +158,7 @@ public void testClose() { @Test public void testReadClosed() { replay(storageRpcMock); - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); reader.close(); try { ByteBuffer readBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); @@ -171,7 +172,7 @@ public void testReadClosed() { @Test public void testReadGenerationChanged() throws IOException { BlobId blobId = BlobId.of(BUCKET_NAME, BLOB_NAME); - reader = new BlobReadChannelImpl(options, blobId, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, blobId, EMPTY_RPC_OPTIONS); byte[] firstResult = randomByteArray(DEFAULT_CHUNK_SIZE); byte[] secondResult = randomByteArray(DEFAULT_CHUNK_SIZE); ByteBuffer firstReadBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); @@ -185,7 +186,7 @@ public void testReadGenerationChanged() throws IOException { reader.read(firstReadBuffer); try { reader.read(secondReadBuffer); - fail("Expected BlobReadChannel read to throw StorageException"); + fail("Expected ReadChannel read to throw StorageException"); } catch (StorageException ex) { StringBuilder messageBuilder = new StringBuilder(); messageBuilder.append("Blob ").append(blobId).append(" was updated while reading"); @@ -204,10 +205,10 @@ public void testSaveAndRestore() throws IOException { expect(storageRpcMock.read(BLOB_ID.toPb(), EMPTY_RPC_OPTIONS, 42, DEFAULT_CHUNK_SIZE)) .andReturn(StorageRpc.Tuple.of("etag", secondResult)); replay(storageRpcMock); - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); reader.read(firstReadBuffer); - RestorableState readerState = reader.capture(); - BlobReadChannel restoredReader = readerState.restore(); + RestorableState readerState = reader.capture(); + ReadChannel restoredReader = readerState.restore(); restoredReader.read(secondReadBuffer); assertArrayEquals(Arrays.copyOf(firstResult, firstReadBuffer.capacity()), firstReadBuffer.array()); @@ -217,11 +218,11 @@ public void testSaveAndRestore() throws IOException { @Test public void testStateEquals() { replay(storageRpcMock); - reader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); + reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); @SuppressWarnings("resource") // avoid closing when you don't want partial writes to GCS - BlobReadChannel secondReader = new BlobReadChannelImpl(options, BLOB_ID, EMPTY_RPC_OPTIONS); - RestorableState state = reader.capture(); - RestorableState secondState = secondReader.capture(); + ReadChannel secondReader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); + RestorableState state = reader.capture(); + RestorableState secondState = secondReader.capture(); assertEquals(state, secondState); assertEquals(state.hashCode(), secondState.hashCode()); assertEquals(state.toString(), secondState.toString()); diff --git a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobTest.java b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobTest.java index bc6a4725d7e7..586e7fd0fd39 100644 --- a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobTest.java +++ b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobTest.java @@ -31,6 +31,7 @@ import static org.junit.Assert.assertTrue; import com.google.api.client.util.Lists; +import com.google.gcloud.ReadChannel; import com.google.gcloud.storage.Storage.CopyRequest; import org.easymock.Capture; @@ -188,7 +189,7 @@ public void testCopyToBlobId() throws Exception { @Test public void testReader() throws Exception { - BlobReadChannel channel = createMock(BlobReadChannel.class); + ReadChannel channel = createMock(ReadChannel.class); expect(storage.reader(BLOB_INFO.blobId())).andReturn(channel); replay(storage); assertSame(channel, blob.reader()); diff --git a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobWriteChannelImplTest.java b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobWriteChannelTest.java similarity index 85% rename from gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobWriteChannelImplTest.java rename to gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobWriteChannelTest.java index 518ba8e14c65..e499f6b9de52 100644 --- a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobWriteChannelImplTest.java +++ b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/BlobWriteChannelTest.java @@ -33,6 +33,7 @@ import com.google.common.collect.ImmutableMap; import com.google.gcloud.RestorableState; import com.google.gcloud.RetryParams; +import com.google.gcloud.WriteChannel; import com.google.gcloud.spi.StorageRpc; import com.google.gcloud.spi.StorageRpcFactory; @@ -48,7 +49,7 @@ import java.util.Map; import java.util.Random; -public class BlobWriteChannelImplTest { +public class BlobWriteChannelTest { private static final String BUCKET_NAME = "b"; private static final String BLOB_NAME = "n"; @@ -63,7 +64,7 @@ public class BlobWriteChannelImplTest { private StorageOptions options; private StorageRpcFactory rpcFactoryMock; private StorageRpc storageRpcMock; - private BlobWriteChannelImpl writer; + private BlobWriteChannel writer; @Before public void setUp() { @@ -88,7 +89,7 @@ public void tearDown() throws Exception { public void testCreate() { expect(storageRpcMock.open(BLOB_INFO.toPb(), EMPTY_RPC_OPTIONS)).andReturn(UPLOAD_ID); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); assertTrue(writer.isOpen()); } @@ -96,7 +97,7 @@ public void testCreate() { public void testWriteWithoutFlush() throws IOException { expect(storageRpcMock.open(BLOB_INFO.toPb(), EMPTY_RPC_OPTIONS)).andReturn(UPLOAD_ID); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); assertEquals(MIN_CHUNK_SIZE, writer.write(ByteBuffer.allocate(MIN_CHUNK_SIZE))); } @@ -107,7 +108,7 @@ public void testWriteWithFlush() throws IOException { storageRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(CUSTOM_CHUNK_SIZE), eq(false)); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); writer.chunkSize(CUSTOM_CHUNK_SIZE); ByteBuffer buffer = randomBuffer(CUSTOM_CHUNK_SIZE); assertEquals(CUSTOM_CHUNK_SIZE, writer.write(buffer)); @@ -121,7 +122,7 @@ public void testWritesAndFlush() throws IOException { storageRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(DEFAULT_CHUNK_SIZE), eq(false)); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); ByteBuffer[] buffers = new ByteBuffer[DEFAULT_CHUNK_SIZE / MIN_CHUNK_SIZE]; for (int i = 0; i < buffers.length; i++) { buffers[i] = randomBuffer(MIN_CHUNK_SIZE); @@ -141,7 +142,7 @@ public void testCloseWithoutFlush() throws IOException { Capture capturedBuffer = Capture.newInstance(); storageRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(0), eq(true)); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); assertTrue(writer.isOpen()); writer.close(); assertArrayEquals(new byte[0], capturedBuffer.getValue()); @@ -156,7 +157,7 @@ public void testCloseWithFlush() throws IOException { storageRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(MIN_CHUNK_SIZE), eq(true)); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); assertTrue(writer.isOpen()); writer.write(buffer); writer.close(); @@ -171,7 +172,7 @@ public void testWriteClosed() throws IOException { Capture capturedBuffer = Capture.newInstance(); storageRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(0), eq(true)); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); writer.close(); try { writer.write(ByteBuffer.allocate(MIN_CHUNK_SIZE)); @@ -192,12 +193,12 @@ public void testSaveAndRestore() throws IOException { replay(storageRpcMock); ByteBuffer buffer1 = randomBuffer(DEFAULT_CHUNK_SIZE); ByteBuffer buffer2 = randomBuffer(DEFAULT_CHUNK_SIZE); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); assertEquals(DEFAULT_CHUNK_SIZE, writer.write(buffer1)); assertArrayEquals(buffer1.array(), capturedBuffer.getValues().get(0)); assertEquals(new Long(0L), capturedPosition.getValues().get(0)); - RestorableState writerState = writer.capture(); - BlobWriteChannel restoredWriter = writerState.restore(); + RestorableState writerState = writer.capture(); + WriteChannel restoredWriter = writerState.restore(); assertEquals(DEFAULT_CHUNK_SIZE, restoredWriter.write(buffer2)); assertArrayEquals(buffer2.array(), capturedBuffer.getValues().get(1)); assertEquals(new Long(DEFAULT_CHUNK_SIZE), capturedPosition.getValues().get(1)); @@ -209,32 +210,31 @@ public void testSaveAndRestoreClosed() throws IOException { Capture capturedBuffer = Capture.newInstance(); storageRpcMock.write(eq(UPLOAD_ID), capture(capturedBuffer), eq(0), eq(0L), eq(0), eq(true)); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); writer.close(); - RestorableState writerState = writer.capture(); - RestorableState expectedWriterState = - BlobWriteChannelImpl.StateImpl.builder(options, BLOB_INFO, UPLOAD_ID) + RestorableState writerState = writer.capture(); + RestorableState expectedWriterState = + BlobWriteChannel.StateImpl.builder(options, BLOB_INFO, UPLOAD_ID) .buffer(null) .chunkSize(DEFAULT_CHUNK_SIZE) .isOpen(false) .position(0) .build(); - BlobWriteChannel restoredWriter = writerState.restore(); + WriteChannel restoredWriter = writerState.restore(); assertArrayEquals(new byte[0], capturedBuffer.getValue()); assertEquals(expectedWriterState, restoredWriter.capture()); } @Test public void testStateEquals() { - expect(storageRpcMock.open(BLOB_INFO.toPb(), EMPTY_RPC_OPTIONS)).andReturn(UPLOAD_ID) - .times(2); + expect(storageRpcMock.open(BLOB_INFO.toPb(), EMPTY_RPC_OPTIONS)).andReturn(UPLOAD_ID).times(2); replay(storageRpcMock); - writer = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + writer = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); // avoid closing when you don't want partial writes to GCS upon failure @SuppressWarnings("resource") - BlobWriteChannel writer2 = new BlobWriteChannelImpl(options, BLOB_INFO, EMPTY_RPC_OPTIONS); - RestorableState state = writer.capture(); - RestorableState state2 = writer2.capture(); + WriteChannel writer2 = new BlobWriteChannel(options, BLOB_INFO, EMPTY_RPC_OPTIONS); + RestorableState state = writer.capture(); + RestorableState state2 = writer2.capture(); assertEquals(state, state2); assertEquals(state.hashCode(), state2.hashCode()); assertEquals(state.toString(), state2.toString()); diff --git a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/ITStorageTest.java b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/ITStorageTest.java index 30ce858dc20e..614ceee7b61e 100644 --- a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/ITStorageTest.java +++ b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/ITStorageTest.java @@ -29,7 +29,9 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.gcloud.Page; +import com.google.gcloud.ReadChannel; import com.google.gcloud.RestorableState; +import com.google.gcloud.WriteChannel; import com.google.gcloud.storage.Storage.BlobField; import com.google.gcloud.storage.Storage.BucketField; import com.google.gcloud.storage.testing.RemoteGcsHelper; @@ -702,14 +704,14 @@ public void testReadAndWriteChannels() throws IOException { String blobName = "test-read-and-write-channels-blob"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName).build(); byte[] stringBytes; - try (BlobWriteChannel writer = storage.writer(blob)) { + try (WriteChannel writer = storage.writer(blob)) { stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); writer.write(ByteBuffer.wrap(BLOB_BYTE_CONTENT)); writer.write(ByteBuffer.wrap(stringBytes)); } ByteBuffer readBytes; ByteBuffer readStringBytes; - try (BlobReadChannel reader = storage.reader(blob.blobId())) { + try (ReadChannel reader = storage.reader(blob.blobId())) { readBytes = ByteBuffer.allocate(BLOB_BYTE_CONTENT.length); readStringBytes = ByteBuffer.allocate(stringBytes.length); reader.read(readBytes); @@ -725,21 +727,21 @@ public void testReadAndWriteCaptureChannels() throws IOException { String blobName = "test-read-and-write-capture-channels-blob"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName).build(); byte[] stringBytes; - BlobWriteChannel writer = storage.writer(blob); + WriteChannel writer = storage.writer(blob); stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); writer.write(ByteBuffer.wrap(BLOB_BYTE_CONTENT)); - RestorableState writerState = writer.capture(); - BlobWriteChannel secondWriter = writerState.restore(); + RestorableState writerState = writer.capture(); + WriteChannel secondWriter = writerState.restore(); secondWriter.write(ByteBuffer.wrap(stringBytes)); secondWriter.close(); ByteBuffer readBytes; ByteBuffer readStringBytes; - BlobReadChannel reader = storage.reader(blob.blobId()); + ReadChannel reader = storage.reader(blob.blobId()); reader.chunkSize(BLOB_BYTE_CONTENT.length); readBytes = ByteBuffer.allocate(BLOB_BYTE_CONTENT.length); reader.read(readBytes); - RestorableState readerState = reader.capture(); - BlobReadChannel secondReader = readerState.restore(); + RestorableState readerState = reader.capture(); + ReadChannel secondReader = readerState.restore(); readStringBytes = ByteBuffer.allocate(stringBytes.length); secondReader.read(readStringBytes); reader.close(); @@ -754,14 +756,14 @@ public void testReadChannelFail() throws IOException { String blobName = "test-read-channel-blob-fail"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName).build(); assertNotNull(storage.create(blob)); - try (BlobReadChannel reader = + try (ReadChannel reader = storage.reader(blob.blobId(), Storage.BlobSourceOption.metagenerationMatch(-1L))) { reader.read(ByteBuffer.allocate(42)); fail("StorageException was expected"); } catch (StorageException ex) { // expected } - try (BlobReadChannel reader = + try (ReadChannel reader = storage.reader(blob.blobId(), Storage.BlobSourceOption.generationMatch(-1L))) { reader.read(ByteBuffer.allocate(42)); fail("StorageException was expected"); @@ -769,7 +771,7 @@ public void testReadChannelFail() throws IOException { // expected } BlobId blobIdWrongGeneration = BlobId.of(BUCKET, blobName, -1L); - try (BlobReadChannel reader = + try (ReadChannel reader = storage.reader(blobIdWrongGeneration, Storage.BlobSourceOption.generationMatch())) { reader.read(ByteBuffer.allocate(42)); fail("StorageException was expected"); @@ -791,13 +793,13 @@ public void testReadChannelFailUpdatedGeneration() throws IOException { BlobInfo remoteBlob = storage.create(blob, content); assertNotNull(remoteBlob); assertEquals(blobSize, (long) remoteBlob.size()); - try (BlobReadChannel reader = storage.reader(blob.blobId())) { + try (ReadChannel reader = storage.reader(blob.blobId())) { reader.chunkSize(chunkSize); ByteBuffer readBytes = ByteBuffer.allocate(chunkSize); int numReadBytes = reader.read(readBytes); assertEquals(chunkSize, numReadBytes); assertArrayEquals(Arrays.copyOf(content, chunkSize), readBytes.array()); - try (BlobWriteChannel writer = storage.writer(blob)) { + try (WriteChannel writer = storage.writer(blob)) { byte[] newContent = new byte[blobSize]; random.nextBytes(newContent); int numWrittenBytes = writer.write(ByteBuffer.wrap(newContent)); @@ -819,8 +821,7 @@ public void testWriteChannelFail() throws IOException { String blobName = "test-write-channel-blob-fail"; BlobInfo blob = BlobInfo.builder(BUCKET, blobName, -1L).build(); try { - try (BlobWriteChannel writer = - storage.writer(blob, Storage.BlobWriteOption.generationMatch())) { + try (WriteChannel writer = storage.writer(blob, Storage.BlobWriteOption.generationMatch())) { writer.write(ByteBuffer.allocate(42)); } fail("StorageException was expected"); @@ -835,7 +836,7 @@ public void testWriteChannelExistingBlob() throws IOException { BlobInfo blob = BlobInfo.builder(BUCKET, blobName).build(); BlobInfo remoteBlob = storage.create(blob); byte[] stringBytes; - try (BlobWriteChannel writer = storage.writer(remoteBlob)) { + try (WriteChannel writer = storage.writer(remoteBlob)) { stringBytes = BLOB_STRING_CONTENT.getBytes(UTF_8); writer.write(ByteBuffer.wrap(stringBytes)); } diff --git a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/SerializationTest.java b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/SerializationTest.java index 555e231f7f0e..8506e8b48f6b 100644 --- a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/SerializationTest.java +++ b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/SerializationTest.java @@ -22,8 +22,10 @@ import com.google.common.collect.ImmutableMap; import com.google.gcloud.AuthCredentials; import com.google.gcloud.PageImpl; +import com.google.gcloud.ReadChannel; import com.google.gcloud.RestorableState; import com.google.gcloud.RetryParams; +import com.google.gcloud.WriteChannel; import com.google.gcloud.spi.StorageRpc; import com.google.gcloud.storage.Acl.Project.ProjectRole; @@ -112,10 +114,10 @@ public void testReadChannelState() throws IOException, ClassNotFoundException { .projectId("p2") .retryParams(RetryParams.defaultInstance()) .build(); - BlobReadChannel reader = - new BlobReadChannelImpl(options, BlobId.of("b", "n"), EMPTY_RPC_OPTIONS); - RestorableState state = reader.capture(); - RestorableState deserializedState = serializeAndDeserialize(state); + ReadChannel reader = + new BlobReadChannel(options, BlobId.of("b", "n"), EMPTY_RPC_OPTIONS); + RestorableState state = reader.capture(); + RestorableState deserializedState = serializeAndDeserialize(state); assertEquals(state, deserializedState); assertEquals(state.hashCode(), deserializedState.hashCode()); assertEquals(state.toString(), deserializedState.toString()); @@ -130,10 +132,10 @@ public void testWriteChannelState() throws IOException, ClassNotFoundException { .build(); // avoid closing when you don't want partial writes to GCS upon failure @SuppressWarnings("resource") - BlobWriteChannelImpl writer = new BlobWriteChannelImpl( - options, BlobInfo.builder(BlobId.of("b", "n")).build(), "upload-id"); - RestorableState state = writer.capture(); - RestorableState deserializedState = serializeAndDeserialize(state); + BlobWriteChannel writer = + new BlobWriteChannel(options, BlobInfo.builder(BlobId.of("b", "n")).build(), "upload-id"); + RestorableState state = writer.capture(); + RestorableState deserializedState = serializeAndDeserialize(state); assertEquals(state, deserializedState); assertEquals(state.hashCode(), deserializedState.hashCode()); assertEquals(state.toString(), deserializedState.toString()); diff --git a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/StorageImplTest.java b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/StorageImplTest.java index 3aaec047714f..0e1f1a0b2f52 100644 --- a/gcloud-java-storage/src/test/java/com/google/gcloud/storage/StorageImplTest.java +++ b/gcloud-java-storage/src/test/java/com/google/gcloud/storage/StorageImplTest.java @@ -33,8 +33,10 @@ import com.google.common.io.BaseEncoding; import com.google.gcloud.AuthCredentials.ServiceAccountAuthCredentials; import com.google.gcloud.Page; +import com.google.gcloud.ReadChannel; import com.google.gcloud.RetryParams; import com.google.gcloud.ServiceOptions; +import com.google.gcloud.WriteChannel; import com.google.gcloud.spi.StorageRpc; import com.google.gcloud.spi.StorageRpc.Tuple; import com.google.gcloud.spi.StorageRpcFactory; @@ -1011,7 +1013,7 @@ public Tuple apply(StorageObject f) { public void testReader() { EasyMock.replay(storageRpcMock); storage = options.service(); - BlobReadChannel channel = storage.reader(BUCKET_NAME1, BLOB_NAME1); + ReadChannel channel = storage.reader(BUCKET_NAME1, BLOB_NAME1); assertNotNull(channel); assertTrue(channel.isOpen()); } @@ -1024,7 +1026,7 @@ public void testReaderWithOptions() throws IOException { .andReturn(StorageRpc.Tuple.of("etag", result)); EasyMock.replay(storageRpcMock); storage = options.service(); - BlobReadChannel channel = storage.reader(BUCKET_NAME1, BLOB_NAME2, BLOB_SOURCE_GENERATION, + ReadChannel channel = storage.reader(BUCKET_NAME1, BLOB_NAME2, BLOB_SOURCE_GENERATION, BLOB_SOURCE_METAGENERATION); assertNotNull(channel); assertTrue(channel.isOpen()); @@ -1039,7 +1041,7 @@ public void testReaderWithOptionsFromBlobId() throws IOException { .andReturn(StorageRpc.Tuple.of("etag", result)); EasyMock.replay(storageRpcMock); storage = options.service(); - BlobReadChannel channel = storage.reader(BLOB_INFO1.blobId(), + ReadChannel channel = storage.reader(BLOB_INFO1.blobId(), BLOB_SOURCE_GENERATION_FROM_BLOB_ID, BLOB_SOURCE_METAGENERATION); assertNotNull(channel); assertTrue(channel.isOpen()); @@ -1055,7 +1057,7 @@ public void testWriter() { .andReturn("upload-id"); EasyMock.replay(storageRpcMock); storage = options.service(); - BlobWriteChannel channel = storage.writer(infoWithHashes); + WriteChannel channel = storage.writer(infoWithHashes); assertNotNull(channel); assertTrue(channel.isOpen()); } @@ -1067,7 +1069,7 @@ public void testWriterWithOptions() { .andReturn("upload-id"); EasyMock.replay(storageRpcMock); storage = options.service(); - BlobWriteChannel channel = storage.writer(info, BLOB_WRITE_METAGENERATION, BLOB_WRITE_NOT_EXIST, + WriteChannel channel = storage.writer(info, BLOB_WRITE_METAGENERATION, BLOB_WRITE_NOT_EXIST, BLOB_WRITE_PREDEFINED_ACL, BLOB_WRITE_CRC2C, BLOB_WRITE_MD5_HASH); assertNotNull(channel); assertTrue(channel.isOpen());