Skip to content

Commit

Permalink
fix maven properties for embedded maven plugin
Browse files Browse the repository at this point in the history
  • Loading branch information
jetoile committed Jun 19, 2018
1 parent f60347f commit f622b02
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 21 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ public class HadoopUnitConfig {
public static final String HDFS_ENABLE_PERMISSIONS_KEY = "hdfs.enable.permissions";
public static final String HDFS_FORMAT_KEY = "hdfs.format";
public static final String HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER = "hdfs.enable.running.user.as.proxy.user";
public static final String HDFS_REPLICATION_KEY = "hdfs.replication";

// HDFS Test
public static final String HDFS_TEST_FILE_KEY = "hdfs.test.file";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,8 @@ public class HBaseBootstrap implements BootstrapHadoop {
private int restMaxThread;
private int restMinThread;

private String hdfsUri;

public HBaseBootstrap() {
if (hbaseLocalCluster == null) {
try {
Expand Down Expand Up @@ -95,7 +97,8 @@ private void init() {
private void build() {
org.apache.hadoop.conf.Configuration hbaseConfiguration = new org.apache.hadoop.conf.Configuration();
hbaseConfiguration.setBoolean("hbase.table.sanity.checks", false);
hbaseConfiguration.set("fs.default.name", "hdfs://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY));
hbaseConfiguration.set("fs.default.name", hdfsUri);
hbaseConfiguration.set("hbase.regionserver.hlog.tolerable.lowreplication", "1");

hbaseLocalCluster = new HbaseLocalCluster.Builder()
.setHbaseMasterPort(port)
Expand Down Expand Up @@ -134,6 +137,7 @@ private void loadConfig() throws BootstrapException {
restReadOnly = configuration.getBoolean(HadoopUnitConfig.HBASE_REST_READONLY_KEY);
restMaxThread = configuration.getInt(HadoopUnitConfig.HBASE_REST_THREADMAX_KEY);
restMinThread = configuration.getInt(HadoopUnitConfig.HBASE_REST_THREADMIN_KEY);
hdfsUri = "hdfs://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY);
}

@Override
Expand Down Expand Up @@ -164,22 +168,25 @@ public void loadConfig(Map<String, String> configs) {
}

if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HBASE_REST_HOST_KEY))) {
restHost = configuration.getString(HadoopUnitConfig.HBASE_REST_HOST_KEY);
restHost = configs.get(HadoopUnitConfig.HBASE_REST_HOST_KEY);
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HBASE_REST_PORT_KEY))) {
restPort = configuration.getInt(HadoopUnitConfig.HBASE_REST_PORT_KEY);
restPort = Integer.parseInt(configs.get(HadoopUnitConfig.HBASE_REST_PORT_KEY));
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HBASE_REST_INFO_PORT_KEY))) {
restInfoPort = configuration.getInt(HadoopUnitConfig.HBASE_REST_INFO_PORT_KEY);
restInfoPort = Integer.parseInt(configs.get(HadoopUnitConfig.HBASE_REST_INFO_PORT_KEY));
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HBASE_REST_READONLY_KEY))) {
restReadOnly = configuration.getBoolean(HadoopUnitConfig.HBASE_REST_READONLY_KEY);
restReadOnly = Boolean.parseBoolean(configs.get(HadoopUnitConfig.HBASE_REST_READONLY_KEY));
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HBASE_REST_THREADMAX_KEY))) {
restMaxThread = configuration.getInt(HadoopUnitConfig.HBASE_REST_THREADMAX_KEY);
restMaxThread = Integer.parseInt(configs.get(HadoopUnitConfig.HBASE_REST_THREADMAX_KEY));
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HBASE_REST_THREADMIN_KEY))) {
restMinThread = configuration.getInt(HadoopUnitConfig.HBASE_REST_THREADMIN_KEY);
restMinThread = Integer.parseInt(configs.get(HadoopUnitConfig.HBASE_REST_THREADMIN_KEY));
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY))) {
hdfsUri = "hdfs://" + configs.get(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + Integer.parseInt(configs.get(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ public class HdfsBootstrap implements BootstrapHadoop {
private boolean format;
private int httpPort;
private String host;
private Integer replication = 1;

public HdfsBootstrap() {
if (hdfsLocalCluster == null) {
Expand Down Expand Up @@ -89,6 +90,9 @@ private void init() {
}

private void build() {
HdfsConfiguration hdfsConfiguration = new HdfsConfiguration();
hdfsConfiguration.set("dfs.replication", replication.toString());

hdfsLocalCluster = new HdfsLocalCluster.Builder()
.setHdfsNamenodePort(port)
.setHdfsNamenodeHttpPort(httpPort)
Expand All @@ -97,7 +101,7 @@ private void build() {
.setHdfsFormat(format)
.setHdfsNumDatanodes(numDatanodes)
.setHdfsTempDir(tempDirectory)
.setHdfsConfig(new HdfsConfiguration())
.setHdfsConfig(hdfsConfiguration)
.build();
}

Expand All @@ -112,6 +116,7 @@ private void loadConfig() throws BootstrapException {
enablePermission = configuration.getBoolean(HadoopUnitConfig.HDFS_ENABLE_PERMISSIONS_KEY);
format = configuration.getBoolean(HadoopUnitConfig.HDFS_FORMAT_KEY);
enableRunningUserAsProxy = configuration.getBoolean(HadoopUnitConfig.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER);
replication = configuration.getInt(HadoopUnitConfig.HDFS_REPLICATION_KEY, 1);
}

@Override
Expand Down Expand Up @@ -140,6 +145,9 @@ public void loadConfig(Map<String, String> configs) {
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER))) {
enableRunningUserAsProxy = Boolean.parseBoolean(configs.get(HadoopUnitConfig.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER));
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HDFS_REPLICATION_KEY))) {
replication = Integer.parseInt(configs.get(HadoopUnitConfig.HDFS_REPLICATION_KEY));
}
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ public class HiveServer2Bootstrap implements BootstrapHadoop {
private String hostMetastore;
private int portMetastore;

private String hdfsUri;

public HiveServer2Bootstrap() {
if (hiveLocalServer2 == null) {
Expand Down Expand Up @@ -94,6 +95,7 @@ private void loadConfig() throws BootstrapException {
scratchDirectory = configuration.getString(HadoopUnitConfig.HIVE_SCRATCH_DIR_KEY);
warehouseDirectory = configuration.getString(HadoopUnitConfig.HIVE_WAREHOUSE_DIR_KEY);
zookeeperConnectionString = configuration.getString(HadoopUnitConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(HadoopUnitConfig.ZOOKEEPER_PORT_KEY);
hdfsUri = "hdfs://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY);
}

@Override
Expand Down Expand Up @@ -122,6 +124,9 @@ public void loadConfig(Map<String, String> configs) {
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.ZOOKEEPER_PORT_KEY))) {
zookeeperConnectionString = configs.get(HadoopUnitConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(HadoopUnitConfig.ZOOKEEPER_PORT_KEY);
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY))) {
hdfsUri = "hdfs://" + configs.get(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + Integer.parseInt(configs.get(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY));
}
}


Expand Down Expand Up @@ -157,7 +162,7 @@ private HiveConf buildHiveConf() {
WindowsLibsUtils.setHadoopHome();

HiveConf hiveConf = new HiveConf();
hiveConf.set("fs.defaultFS", "hdfs://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getInt(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY));
hiveConf.set("fs.defaultFS", hdfsUri);
// hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true");
// hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,15 @@
import fr.jetoile.hadoopunit.HadoopUtils;
import fr.jetoile.hadoopunit.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.net.URL;
import java.util.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

public class KnoxBootstrap implements Bootstrap {
Expand All @@ -50,6 +50,12 @@ public class KnoxBootstrap implements Bootstrap {
private String tempDirectory;
private List<KnoxService> services = new ArrayList<>();

private String namenodeUri;
private String webHdfsUri;
private String webHBaseUri;
private String oozieUri;


public KnoxBootstrap() {
if (knoxLocalCluster == null) {
try {
Expand Down Expand Up @@ -121,28 +127,28 @@ String getTopology(List<KnoxService> services) {
xmlTag
.addTag("service")
.addTag("role").addText(service.name())
.addTag("url").addText("hdfs://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY))
.addTag("url").addText(namenodeUri)
.gotoParent();
break;
case WEBHDFS:
xmlTag
.addTag("service")
.addTag("role").addText(service.name())
.addTag("url").addText("http://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HTTP_PORT_KEY) + "/webhdfs")
.addTag("url").addText(webHdfsUri)
.gotoParent();
break;
case WEBHBASE:
xmlTag
.addTag("service")
.addTag("role").addText(service.name())
.addTag("url").addText("http://" + configuration.getString(HadoopUnitConfig.HBASE_REST_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HBASE_REST_PORT_KEY))
.addTag("url").addText(webHBaseUri)
.gotoParent();
break;
case OOZIE:
xmlTag
.addTag("service")
.addTag("role").addText(service.name())
.addTag("url").addText("http://" + configuration.getString(HadoopUnitConfig.OOZIE_HOST) + ":" + configuration.getString(HadoopUnitConfig.OOZIE_PORT) + "/oozie")
.addTag("url").addText(oozieUri)
.gotoParent();
break;
}
Expand All @@ -160,6 +166,12 @@ private void loadConfig() throws BootstrapException {

List<String> servicesList = Arrays.asList(configuration.getStringArray(HadoopUnitConfig.KNOX_SERVICE_KEY));
services = Arrays.asList(KnoxService.values()).stream().filter(s -> servicesList.contains(s.getName())).collect(Collectors.toList());

namenodeUri = "hdfs://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY);
webHdfsUri = "http://" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY) + "/webhdfs";
webHBaseUri = "http://" + configuration.getString(HadoopUnitConfig.HBASE_REST_HOST_KEY) + ":" + configuration.getString(HadoopUnitConfig.HBASE_REST_PORT_KEY);
oozieUri = "http://" + configuration.getString(HadoopUnitConfig.OOZIE_HOST) + ":" + configuration.getString(HadoopUnitConfig.OOZIE_PORT) + "/oozie";

}

@Override
Expand All @@ -168,21 +180,31 @@ public void loadConfig(Map<String, String> configs) {
port = Integer.parseInt(configs.get(HadoopUnitConfig.KNOX_PORT_KEY));
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.KNOX_HOST_KEY))) {
host = configuration.getString(HadoopUnitConfig.KNOX_HOST_KEY);
host = configs.get(HadoopUnitConfig.KNOX_HOST_KEY);
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.KNOX_PATH_KEY))) {
path = configuration.getString(HadoopUnitConfig.KNOX_PATH_KEY);
path = configs.get(HadoopUnitConfig.KNOX_PATH_KEY);
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.KNOX_CLUSTER_KEY))) {
clusterName = configuration.getString(HadoopUnitConfig.KNOX_CLUSTER_KEY);
clusterName = configs.get(HadoopUnitConfig.KNOX_CLUSTER_KEY);
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.KNOX_HOME_DIR_KEY))) {
tempDirectory = configuration.getString(HadoopUnitConfig.KNOX_HOME_DIR_KEY);
tempDirectory = configs.get(HadoopUnitConfig.KNOX_HOME_DIR_KEY);
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.KNOX_SERVICE_KEY))) {
List<String> servicesList = Arrays.asList(configuration.getStringArray(HadoopUnitConfig.KNOX_SERVICE_KEY));
services = Arrays.asList(KnoxService.values()).stream().filter(s -> servicesList.contains(s.getName())).collect(Collectors.toList());
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY)) {
namenodeUri = "hdfs://" + configs.get(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configs.get(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY);
webHdfsUri = "http://" + configs.get(HadoopUnitConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configs.get(HadoopUnitConfig.HDFS_NAMENODE_PORT_KEY) + "/webhdfs";
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.HBASE_REST_HOST_KEY)) && StringUtils.isNotEmpty(HadoopUnitConfig.HBASE_REST_PORT_KEY)) {
webHBaseUri = "http://" + configs.get(HadoopUnitConfig.HBASE_REST_HOST_KEY) + ":" + configs.get(HadoopUnitConfig.HBASE_REST_PORT_KEY);
}
if (StringUtils.isNotEmpty(configs.get(HadoopUnitConfig.OOZIE_HOST)) && StringUtils.isNotEmpty(HadoopUnitConfig.OOZIE_PORT)) {
webHBaseUri = "http://" + configs.get(HadoopUnitConfig.OOZIE_HOST) + ":" + configs.get(HadoopUnitConfig.OOZIE_PORT);
}
}


Expand Down

0 comments on commit f622b02

Please sign in to comment.