Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixing line lengths in murmur3 and hdfs plugins #34603

Merged
merged 1 commit into from
Oct 18, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ protected void setupFieldType(BuilderContext context) {

public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext)
throws MapperParsingException {
Builder builder = new Builder(name);

// tweaking these settings is no longer allowed, the entire purpose of murmur3 fields is to store a hash
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ public HdfsRepository(RepositoryMetaData metadata, Environment environment,
uri = URI.create(uriSetting);
if ("hdfs".equalsIgnoreCase(uri.getScheme()) == false) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore", uri.getScheme(), uriSetting));
"Invalid scheme [%s] specified in uri [%s]; only 'hdfs' uri allowed for hdfs snapshot/restore",
uri.getScheme(),
uriSetting));
}
if (Strings.hasLength(uri.getPath()) && uri.getPath().equals("/") == false) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
Expand Down Expand Up @@ -134,7 +136,10 @@ private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositoryS
}
});

logger.debug("Using file-system [{}] for URI [{}], path [{}]", fileContext.getDefaultFileSystem(), fileContext.getDefaultFileSystem().getUri(), path);
logger.debug("Using file-system [{}] for URI [{}], path [{}]",
fileContext.getDefaultFileSystem(),
fileContext.getDefaultFileSystem().getUri(),
path);

try {
return new HdfsBlobStore(fileContext, path, bufferSize, isReadOnly(), haEnabled);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,25 @@ public void testSimpleWorkflow() {
assertThat(count(client, "test-idx-3"), equalTo(100L));

logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
CreateSnapshotResponse createSnapshotResponse = client.admin()
.cluster()
.prepareCreateSnapshot("test-repo", "test-snap")
.setWaitForCompletion(true)
.setIndices("test-idx-*", "-test-idx-3")
.get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));

assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(),
equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));

assertThat(client.admin()
.cluster()
.prepareGetSnapshots("test-repo")
.setSnapshots("test-snap")
.get()
.getSnapshots()
.get(0)
.state(),
equalTo(SnapshotState.SUCCESS));

logger.info("--> delete some data");
for (int i = 0; i < 50; i++) {
Expand All @@ -101,7 +115,12 @@ public void testSimpleWorkflow() {
client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();

logger.info("--> restore all indices from the snapshot");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
RestoreSnapshotResponse restoreSnapshotResponse = client.admin()
.cluster()
.prepareRestoreSnapshot("test-repo", "test-snap")
.setWaitForCompletion(true)
.execute()
.actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));

ensureGreen();
Expand All @@ -113,7 +132,13 @@ public void testSimpleWorkflow() {
logger.info("--> delete indices");
client().admin().indices().prepareDelete("test-idx-1", "test-idx-2").get();
logger.info("--> restore one index after deletion");
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
restoreSnapshotResponse = client.admin()
.cluster()
.prepareRestoreSnapshot("test-repo", "test-snap")
.setWaitForCompletion(true)
.setIndices("test-idx-*", "-test-idx-2")
.execute()
.actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureGreen();
assertThat(count(client, "test-idx-1"), equalTo(100L));
Expand Down