diff --git a/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrap.java b/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrap.java index 92de6474..c717e8bc 100644 --- a/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrap.java +++ b/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrap.java @@ -88,7 +88,7 @@ private void loadConfig() { tmpDirPath = getTmpDirPath(configuration, BookkeeperConfig.BOOKKEEPER_TEMP_DIR_KEY); zookeeperPort = configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); - zookeeperHost = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY); + zookeeperHost = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY); } @Override @@ -96,12 +96,22 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(BookkeeperConfig.BOOKKEEPER_PORT_KEY))) { port = Integer.parseInt(configs.get(BookkeeperConfig.BOOKKEEPER_PORT_KEY)); } + if (StringUtils.isNotEmpty(configs.get(BookkeeperConfig.BOOKKEEPER_HTTP_PORT_KEY))) { + httpPort = Integer.parseInt(configs.get(BookkeeperConfig.BOOKKEEPER_HTTP_PORT_KEY)); + } if (StringUtils.isNotEmpty(configs.get(BookkeeperConfig.BOOKKEEPER_IP_KEY))) { ip = configs.get(BookkeeperConfig.BOOKKEEPER_IP_KEY); } if (StringUtils.isNotEmpty(configs.get(BookkeeperConfig.BOOKKEEPER_TEMP_DIR_KEY))) { tmpDirPath = getTmpDirPath(configs, BookkeeperConfig.BOOKKEEPER_TEMP_DIR_KEY); } + + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + zookeeperHost = configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + } + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + zookeeperPort = Integer.valueOf(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); + } } private void build() throws Exception { diff --git a/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperConfig.java b/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperConfig.java index 42685602..37f1ac67 100644 --- a/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperConfig.java +++ b/hadoop-unit-bookkeeper/src/main/java/fr/jetoile/hadoopunit/component/BookkeeperConfig.java @@ -21,5 +21,7 @@ public class BookkeeperConfig { public static final String BOOKKEEPER_HTTP_PORT_KEY = "bookkeeper.http.port"; public static final String BOOKKEEPER_TEMP_DIR_KEY = "bookkeeper.temp.dir"; + public static final String BOOKKEEPER_IP_CLIENT_KEY = "bookkeeper.client.ip"; + private BookkeeperConfig() {} } diff --git a/hadoop-unit-bookkeeper/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-bookkeeper/src/main/resources/hadoop-unit-default.properties index 913adf6f..61acc715 100644 --- a/hadoop-unit-bookkeeper/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-bookkeeper/src/main/resources/hadoop-unit-default.properties @@ -6,9 +6,12 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # BookKeeper bookkeeper.ip=127.0.0.1 bookkeeper.port=31810 bookkeeper.http.port=31900 -bookkeeper.temp.dir=/bookeeper \ No newline at end of file +bookkeeper.temp.dir=/bookeeper + +bookkeeper.client.ip=127.0.0.1 diff --git a/hadoop-unit-bookkeeper/src/test/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrapTest.java b/hadoop-unit-bookkeeper/src/test/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrapTest.java index df9d1d9c..b324cec3 100644 --- a/hadoop-unit-bookkeeper/src/test/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrapTest.java +++ b/hadoop-unit-bookkeeper/src/test/java/fr/jetoile/hadoopunit/component/BookkeeperBootstrapTest.java @@ -61,7 +61,7 @@ public static void tearDown() throws BootstrapException { public void bookkeeperShouldStart() throws NotFoundServiceException { Client client = ClientBuilder.newClient(); - String uri = "http://localhost:" + configuration.getInt(BookkeeperConfig.BOOKKEEPER_HTTP_PORT_KEY); + String uri = "http://" + configuration.getString(BookkeeperConfig.BOOKKEEPER_IP_CLIENT_KEY) + ":" + configuration.getInt(BookkeeperConfig.BOOKKEEPER_HTTP_PORT_KEY); Response hearbeatResponse = client.target(uri + "/heartbeat").request().get(); assertThat(hearbeatResponse.getStatus()).isEqualTo(200); diff --git a/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraBootstrap.java b/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraBootstrap.java index a6597152..6d7510d7 100644 --- a/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraBootstrap.java +++ b/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraBootstrap.java @@ -39,8 +39,11 @@ public class CassandraBootstrap implements Bootstrap { private Configuration configuration; private CassandraShutDownHook shutdownHook; private int port; - private String ip; + private String listenAddressIp; private String tmpDirPath; + private String rpcAddressIp; + private String broadcastAddressIp; + private String broadcastRpcAddressIp; public CassandraBootstrap() { try { @@ -67,13 +70,19 @@ public ComponentMetadata getMetadata() { @Override public String getProperties() { - return "\n \t\t\t ip:" + ip + + return "\n \t\t\t listenAddressIp:" + listenAddressIp + + "\n \t\t\t rpcAddressIp:" + rpcAddressIp + + "\n \t\t\t broadcastAddressIp:" + broadcastAddressIp + + "\n \t\t\t broadcastRpcAddressIp:" + broadcastRpcAddressIp + "\n \t\t\t port:" + port; } private void loadConfig() { port = configuration.getInt(CassandraConfig.CASSANDRA_PORT_KEY); - ip = configuration.getString(CassandraConfig.CASSANDRA_IP_KEY); + listenAddressIp = configuration.getString(CassandraConfig.CASSANDRA_LISTEN_ADDRESS_IP_KEY); + rpcAddressIp = configuration.getString(CassandraConfig.CASSANDRA_RPC_ADDRESS_IP_KEY); + broadcastAddressIp = configuration.getString(CassandraConfig.CASSANDRA_BROADCAST_ADDRESS_IP_KEY); + broadcastRpcAddressIp = configuration.getString(CassandraConfig.CASSANDRA_BROADCAST_RPC_ADDRESS_IP_KEY); tmpDirPath = getTmpDirPath(configuration, CassandraConfig.CASSANDRA_TEMP_DIR_KEY); } @@ -82,8 +91,17 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(CassandraConfig.CASSANDRA_PORT_KEY))) { port = Integer.parseInt(configs.get(CassandraConfig.CASSANDRA_PORT_KEY)); } - if (StringUtils.isNotEmpty(configs.get(CassandraConfig.CASSANDRA_IP_KEY))) { - ip = configs.get(CassandraConfig.CASSANDRA_IP_KEY); + if (StringUtils.isNotEmpty(configs.get(CassandraConfig.CASSANDRA_LISTEN_ADDRESS_IP_KEY))) { + listenAddressIp = configs.get(CassandraConfig.CASSANDRA_LISTEN_ADDRESS_IP_KEY); + } + if (StringUtils.isNotEmpty(configs.get(CassandraConfig.CASSANDRA_RPC_ADDRESS_IP_KEY))) { + rpcAddressIp = configs.get(CassandraConfig.CASSANDRA_RPC_ADDRESS_IP_KEY); + } + if (StringUtils.isNotEmpty(configs.get(CassandraConfig.CASSANDRA_BROADCAST_ADDRESS_IP_KEY))) { + broadcastAddressIp = configs.get(CassandraConfig.CASSANDRA_BROADCAST_ADDRESS_IP_KEY); + } + if (StringUtils.isNotEmpty(configs.get(CassandraConfig.CASSANDRA_BROADCAST_RPC_ADDRESS_IP_KEY))) { + broadcastRpcAddressIp = configs.get(CassandraConfig.CASSANDRA_BROADCAST_RPC_ADDRESS_IP_KEY); } if (StringUtils.isNotEmpty(configs.get(CassandraConfig.CASSANDRA_TEMP_DIR_KEY))) { tmpDirPath = getTmpDirPath(configs, CassandraConfig.CASSANDRA_TEMP_DIR_KEY); @@ -100,10 +118,10 @@ private void build() throws IOException { shutdownHook = new CassandraShutDownHook(); session = CassandraEmbeddedServerBuilder.builder() - .withListenAddress(ip) - .withRpcAddress(ip) - .withBroadcastAddress(ip) - .withBroadcastRpcAddress(ip) + .withListenAddress(listenAddressIp) + .withRpcAddress(rpcAddressIp) + .withBroadcastAddress(broadcastAddressIp) + .withBroadcastRpcAddress(broadcastRpcAddressIp) .withCQLPort(port) .withDataFolder(tmpDirPath + "/data") .withCommitLogFolder(tmpDirPath + "/commitlog") diff --git a/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraConfig.java b/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraConfig.java index ddac24ee..dd07f1e0 100644 --- a/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraConfig.java +++ b/hadoop-unit-cassandra/src/main/java/fr/jetoile/hadoopunit/component/CassandraConfig.java @@ -16,9 +16,14 @@ public class CassandraConfig { // Cassandra - public static final String CASSANDRA_IP_KEY = "cassandra.ip"; + public static final String CASSANDRA_LISTEN_ADDRESS_IP_KEY = "cassandra.listen.address.ip"; + public static final String CASSANDRA_RPC_ADDRESS_IP_KEY = "cassandra.rpc.address.ip"; + public static final String CASSANDRA_BROADCAST_ADDRESS_IP_KEY = "cassandra.broadcast.address.ip"; + public static final String CASSANDRA_BROADCAST_RPC_ADDRESS_IP_KEY = "cassandra.broadcast.rpc.address.ip"; public static final String CASSANDRA_PORT_KEY = "cassandra.port"; public static final String CASSANDRA_TEMP_DIR_KEY = "cassandra.temp.dir"; + public static final String CASSANDRA_LISTEN_ADDRESS_IP_CLIENT_KEY = "cassandra.listen.address.client.ip"; + private CassandraConfig() {} } diff --git a/hadoop-unit-cassandra/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-cassandra/src/main/resources/hadoop-unit-default.properties index 42fcd366..5de20255 100644 --- a/hadoop-unit-cassandra/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-cassandra/src/main/resources/hadoop-unit-default.properties @@ -114,10 +114,15 @@ mongo.database.name=test_database mongo.collection.name=test_collection # Cassandra -cassandra.ip=127.0.0.1 +cassandra.listen.address.ip=127.0.0.1 +cassandra.rpc.address.ip=0.0.0.0 +cassandra.broadcast.address.ip=127.0.0.1 +cassandra.broadcast.rpc.address.ip=127.0.0.1 cassandra.port=13433 cassandra.temp.dir=/embedded_cassandra +cassandra.listen.address.client.ip=127.0.0.1 + # ElasticSearch elasticsearch.version=5.4.3 elasticsearch.ip=127.0.0.1 diff --git a/hadoop-unit-cassandra/src/test/java/fr/jetoile/hadoopunit/component/CassandraBootstrapTest.java b/hadoop-unit-cassandra/src/test/java/fr/jetoile/hadoopunit/component/CassandraBootstrapTest.java index 18758b24..f3786eb1 100644 --- a/hadoop-unit-cassandra/src/test/java/fr/jetoile/hadoopunit/component/CassandraBootstrapTest.java +++ b/hadoop-unit-cassandra/src/test/java/fr/jetoile/hadoopunit/component/CassandraBootstrapTest.java @@ -92,7 +92,7 @@ public void cassandraShouldStart() throws NotFoundServiceException { @Test public void cassandraShouldStartWithRealDriver() throws NotFoundServiceException { Cluster cluster = Cluster.builder() - .addContactPoints(configuration.getString(CassandraConfig.CASSANDRA_IP_KEY)).withPort(configuration.getInt(CassandraConfig.CASSANDRA_PORT_KEY)).build(); + .addContactPoints(configuration.getString(CassandraConfig.CASSANDRA_LISTEN_ADDRESS_IP_CLIENT_KEY)).withPort(configuration.getInt(CassandraConfig.CASSANDRA_PORT_KEY)).build(); Session session = cluster.connect(); session.execute("insert into test.test(user, value) values('user2', 'value2')"); diff --git a/hadoop-unit-cassandra/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-cassandra/src/test/resources/hadoop-unit-default.properties index 42fcd366..5de20255 100644 --- a/hadoop-unit-cassandra/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-cassandra/src/test/resources/hadoop-unit-default.properties @@ -114,10 +114,15 @@ mongo.database.name=test_database mongo.collection.name=test_collection # Cassandra -cassandra.ip=127.0.0.1 +cassandra.listen.address.ip=127.0.0.1 +cassandra.rpc.address.ip=0.0.0.0 +cassandra.broadcast.address.ip=127.0.0.1 +cassandra.broadcast.rpc.address.ip=127.0.0.1 cassandra.port=13433 cassandra.temp.dir=/embedded_cassandra +cassandra.listen.address.client.ip=127.0.0.1 + # ElasticSearch elasticsearch.version=5.4.3 elasticsearch.ip=127.0.0.1 diff --git a/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java b/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java index 9c0a8ae7..8d6210f4 100644 --- a/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java +++ b/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java @@ -32,5 +32,10 @@ public class ConfluentConfig { public static final String CONFLUENT_KSQL_HOST_KEY = "confluent.ksql.host"; public static final String CONFLUENT_KSQL_PORT_KEY = "confluent.ksql.port"; + public static final String CONFLUENT_SCHEMAREGISTRY_HOST_CLIENT_KEY = "confluent.schemaregistry.client.host"; + public static final String CONFLUENT_KAFKA_HOST_CLIENT_KEY = "confluent.kafka.client.host"; + public static final String CONFLUENT_KSQL_HOST_CLIENT_KEY = "confluent.ksql.client.host"; + public static final String CONFLUENT_REST_HOST_CLIENT_KEY = "confluent.rest.client.host"; + private ConfluentConfig() {} } diff --git a/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaRestBootstrap.java b/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaRestBootstrap.java index 755cf996..593734d8 100644 --- a/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaRestBootstrap.java +++ b/hadoop-unit-confluent-rest/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaRestBootstrap.java @@ -71,18 +71,18 @@ public String getProperties() { } public void loadConfig() { - restConfig.put("schema.registry.url", configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_HOST_KEY) + ":" + configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_PORT_KEY)); - restConfig.put("zookeeper.connect", configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getString(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); + restConfig.put("schema.registry.url", configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_HOST_CLIENT_KEY) + ":" + configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_PORT_KEY)); + restConfig.put("zookeeper.connect", configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getString(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); restConfig.put("listeners", "http://" + configuration.getString(ConfluentConfig.CONFLUENT_REST_HOST_KEY) + ":" + configuration.getString(ConfluentConfig.CONFLUENT_REST_PORT_KEY)); } @Override public void loadConfig(Map configs) { - if (StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_PORT_KEY))) { - restConfig.put("schema.registry.url", configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_HOST_KEY) + ":" + configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_PORT_KEY)); + if (StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_PORT_KEY))) { + restConfig.put("schema.registry.url", configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_HOST_CLIENT_KEY) + ":" + configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_PORT_KEY)); } - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - restConfig.put("zookeeper.connect", configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + restConfig.put("zookeeper.connect", configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); } if (StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_REST_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_REST_PORT_KEY))) { restConfig.put("listeners", "http://" + configs.get(ConfluentConfig.CONFLUENT_REST_HOST_KEY) + ":" + configs.get(ConfluentConfig.CONFLUENT_REST_PORT_KEY)); diff --git a/hadoop-unit-confluent-rest/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-confluent-rest/src/main/resources/hadoop-unit-default.properties index b55a6dfd..156b099d 100644 --- a/hadoop-unit-confluent-rest/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-confluent-rest/src/main/resources/hadoop-unit-default.properties @@ -7,6 +7,8 @@ zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir @@ -192,4 +194,9 @@ confluent.rest.host=127.0.0.1 confluent.rest.port=8082 confluent.ksql.host=127.0.0.1 -confluent.ksql.port=8083 \ No newline at end of file +confluent.ksql.port=8083 + +confluent.schemaregistry.client.host=127.0.0.1 +confluent.kafka.client.host=127.0.0.1 +confluent.rest.client.host=127.0.0.1 +confluent.ksql.client.host=127.0.0.1 \ No newline at end of file diff --git a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java index 740454bf..fc1eeb73 100644 --- a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java +++ b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentConfig.java @@ -29,5 +29,9 @@ public class ConfluentConfig { public static final String CONFLUENT_KSQL_HOST_KEY = "confluent.ksql.host"; public static final String CONFLUENT_KSQL_PORT_KEY = "confluent.ksql.port"; + public static final String CONFLUENT_SCHEMAREGISTRY_HOST_CLIENT_KEY = "confluent.schemaregistry.client.host"; + public static final String CONFLUENT_KAFKA_HOST_CLIENT_KEY = "confluent.kafka.client.host"; + public static final String CONFLUENT_KSQL_HOST_CLIENT_KEY = "confluent.ksql.client.host"; + private ConfluentConfig() {} } diff --git a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaBootstrap.java b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaBootstrap.java index ba64d024..ac6ba149 100644 --- a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaBootstrap.java +++ b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKafkaBootstrap.java @@ -15,7 +15,6 @@ package fr.jetoile.hadoopunit.component; import fr.jetoile.hadoopunit.ComponentMetadata; -import fr.jetoile.hadoopunit.HadoopUnitConfig; import fr.jetoile.hadoopunit.HadoopUtils; import fr.jetoile.hadoopunit.exception.BootstrapException; import kafka.server.KafkaConfig; @@ -78,10 +77,10 @@ public String getProperties() { } public void loadConfig() { - kafkaConfig.put("zookeeper.connect", configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getString(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); + kafkaConfig.put("zookeeper.connect", configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getString(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); kafkaConfig.put("log.dirs", getTmpDirPath(configuration, ConfluentConfig.CONFLUENT_KAFKA_LOG_DIR_KEY)); kafkaConfig.put("broker.id", configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_BROKER_ID_KEY)); -// kafkaConfig.put("advertised.listeners", "PLAINTEXT://localhost:22222"); + kafkaConfig.put("advertised.listeners", "PLAINTEXT://" + ":" + configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_PORT_KEY)); kafkaConfig.put("advertised.host.name", configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_HOST_KEY)); kafkaConfig.put("port", configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_PORT_KEY)); kafkaConfig.put("confluent.support.metrics.enable", "false"); @@ -91,8 +90,8 @@ public void loadConfig() { @Override public void loadConfig(Map configs) { - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - kafkaConfig.put("zookeeper.connect", configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + kafkaConfig.put("zookeeper.connect", configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); } if (StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_KAFKA_LOG_DIR_KEY))) { kafkaConfig.put("log.dirs", getTmpDirPath(configs, ConfluentConfig.CONFLUENT_KAFKA_LOG_DIR_KEY)); diff --git a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKsqlRestBootstrap.java b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKsqlRestBootstrap.java index 205d2be4..f5e16e61 100644 --- a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKsqlRestBootstrap.java +++ b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentKsqlRestBootstrap.java @@ -80,7 +80,7 @@ public String getProperties() { public void loadConfig() { ksqlConfig.put(KsqlRestConfig.LISTENERS_CONFIG, "http://" + configuration.getString(ConfluentConfig.CONFLUENT_KSQL_HOST_KEY) + ":" + configuration.getString(ConfluentConfig.CONFLUENT_KSQL_PORT_KEY)); // props.put(KsqlRestConfig.PORT_CONFIG, String.valueOf(portNumber)); - ksqlConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_HOST_KEY) + ":" + configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_PORT_KEY)); + ksqlConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_HOST_CLIENT_KEY) + ":" + configuration.getString(ConfluentConfig.CONFLUENT_KAFKA_PORT_KEY)); ksqlConfig.put(StreamsConfig.APPLICATION_ID_CONFIG, "ksql_config_test"); // ksqlConfig.put(KsqlRestConfig.COMMAND_TOPIC_SUFFIX, "commands"); } @@ -91,7 +91,7 @@ public void loadConfig(Map configs) { ksqlConfig.put(KsqlRestConfig.LISTENERS_CONFIG, "http://" + configs.get(ConfluentConfig.CONFLUENT_KSQL_HOST_KEY) + ":" + configs.get(ConfluentConfig.CONFLUENT_KSQL_PORT_KEY)); } if (StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_KAFKA_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_KAFKA_PORT_KEY))) { - ksqlConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, configs.get(ConfluentConfig.CONFLUENT_KAFKA_HOST_KEY) + ":" + configs.get(ConfluentConfig.CONFLUENT_KAFKA_PORT_KEY)); + ksqlConfig.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, configs.get(ConfluentConfig.CONFLUENT_KAFKA_HOST_CLIENT_KEY) + ":" + configs.get(ConfluentConfig.CONFLUENT_KAFKA_PORT_KEY)); } } diff --git a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentSchemaRegistryBootstrap.java b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentSchemaRegistryBootstrap.java index 40b808ab..7f6a2750 100644 --- a/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentSchemaRegistryBootstrap.java +++ b/hadoop-unit-confluent/src/main/java/fr/jetoile/hadoopunit/component/ConfluentSchemaRegistryBootstrap.java @@ -76,7 +76,7 @@ public void loadConfig() { schemaRegistryConfig.put("debug", configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_DEBUG_KEY)); schemaRegistryConfig.put("listeners", "http://" + configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_HOST_KEY) + ":" + configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_PORT_KEY)); schemaRegistryConfig.put("kafkastore.topic", configuration.getString(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_TOPIC_KEY)); - schemaRegistryConfig.put("kafkastore.connection.url", configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getString(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); + schemaRegistryConfig.put("kafkastore.connection.url", configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getString(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); } @Override @@ -90,8 +90,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_TOPIC_KEY))) { schemaRegistryConfig.put("kafkastore.topic", configs.get(ConfluentConfig.CONFLUENT_SCHEMAREGISTRY_TOPIC_KEY)); } - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - schemaRegistryConfig.put("kafkastore.connection.url", configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + schemaRegistryConfig.put("kafkastore.connection.url", configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); } } diff --git a/hadoop-unit-confluent/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-confluent/src/main/resources/hadoop-unit-default.properties index b55a6dfd..ad686854 100644 --- a/hadoop-unit-confluent/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-confluent/src/main/resources/hadoop-unit-default.properties @@ -6,7 +6,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 - +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir @@ -192,4 +192,9 @@ confluent.rest.host=127.0.0.1 confluent.rest.port=8082 confluent.ksql.host=127.0.0.1 -confluent.ksql.port=8083 \ No newline at end of file +confluent.ksql.port=8083 + +confluent.schemaregistry.client.host=127.0.0.1 +confluent.kafka.client.host=127.0.0.1 +confluent.rest.client.host=127.0.0.1 +confluent.ksql.client.host=127.0.0.1 diff --git a/hadoop-unit-hbase/src/main/java/fr/jetoile/hadoopunit/component/HBaseBootstrap.java b/hadoop-unit-hbase/src/main/java/fr/jetoile/hadoopunit/component/HBaseBootstrap.java index 91fdb4a3..ab190063 100644 --- a/hadoop-unit-hbase/src/main/java/fr/jetoile/hadoopunit/component/HBaseBootstrap.java +++ b/hadoop-unit-hbase/src/main/java/fr/jetoile/hadoopunit/component/HBaseBootstrap.java @@ -121,7 +121,7 @@ private void loadConfig() throws BootstrapException { infoPort = configuration.getInt(HBaseConfig.HBASE_MASTER_INFO_PORT_KEY); nbRegionServer = configuration.getInt(HBaseConfig.HBASE_NUM_REGION_SERVERS_KEY); rootDirectory = configuration.getString(HBaseConfig.HBASE_ROOT_DIR_KEY); - zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); zookeeperPort = configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); zookeeperZnodeParent = configuration.getString(HBaseConfig.HBASE_ZNODE_PARENT_KEY); enableWalReplication = configuration.getBoolean(HBaseConfig.HBASE_WAL_REPLICATION_ENABLED_KEY); @@ -132,7 +132,7 @@ private void loadConfig() throws BootstrapException { restReadOnly = configuration.getBoolean(HBaseConfig.HBASE_REST_READONLY_KEY); restMaxThread = configuration.getInt(HBaseConfig.HBASE_REST_THREADMAX_KEY); restMinThread = configuration.getInt(HBaseConfig.HBASE_REST_THREADMIN_KEY); - hdfsUri = "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HdfsConfig.HDFS_NAMENODE_PORT_KEY); + hdfsUri = "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + configuration.getString(HdfsConfig.HDFS_NAMENODE_PORT_KEY); } @Override @@ -149,8 +149,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(HBaseConfig.HBASE_ROOT_DIR_KEY))) { rootDirectory = configs.get(HBaseConfig.HBASE_ROOT_DIR_KEY); } - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); } if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { zookeeperPort = Integer.parseInt(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY)); @@ -180,8 +180,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(HBaseConfig.HBASE_REST_THREADMIN_KEY))) { restMinThread = Integer.parseInt(configs.get(HBaseConfig.HBASE_REST_THREADMIN_KEY)); } - if (StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY))) { - hdfsUri = "hdfs://" + configs.get(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + Integer.parseInt(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY)); + if (StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY))) { + hdfsUri = "hdfs://" + configs.get(HdfsConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + Integer.parseInt(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY)); } } diff --git a/hadoop-unit-hbase/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-hbase/src/main/resources/hadoop-unit-default.properties index fb3a4870..8f604c46 100644 --- a/hadoop-unit-hbase/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hbase/src/main/resources/hadoop-unit-default.properties @@ -6,6 +6,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir @@ -39,6 +40,11 @@ hdfs.enable.running.user.as.proxy.user=true hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=localhost +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 + # HBase hbase.master.port=25111 @@ -56,6 +62,8 @@ hbase.rest.host=0.0.0.0 hbase.rest.threads.max=100 hbase.rest.threads.min=2 +hbase.rest.client.host=127.0.0.1 + # HBase Test hbase.test.table.name=hbase_test_table hbase.test.col.family.name=cf1 diff --git a/hadoop-unit-hbase/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-hbase/src/test/resources/hadoop-unit-default.properties index fb3a4870..736cd475 100644 --- a/hadoop-unit-hbase/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hbase/src/test/resources/hadoop-unit-default.properties @@ -6,6 +6,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir @@ -39,6 +40,11 @@ hdfs.enable.running.user.as.proxy.user=true hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=localhost +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 + # HBase hbase.master.port=25111 diff --git a/hadoop-unit-hdfs/src/main/java/fr/jetoile/hadoopunit/component/HdfsConfig.java b/hadoop-unit-hdfs/src/main/java/fr/jetoile/hadoopunit/component/HdfsConfig.java index 15dcdfc8..e74bd11a 100644 --- a/hadoop-unit-hdfs/src/main/java/fr/jetoile/hadoopunit/component/HdfsConfig.java +++ b/hadoop-unit-hdfs/src/main/java/fr/jetoile/hadoopunit/component/HdfsConfig.java @@ -34,5 +34,10 @@ public class HdfsConfig { public static final String HDFS_TEST_FILE_KEY = "hdfs.test.file"; public static final String HDFS_TEST_STRING_KEY = "hdfs.test.string"; + public static final String HDFS_NAMENODE_HOST_CLIENT_KEY = "hdfs.namenode.client.host"; + public static final String HDFS_DATANODE_ADDRESS_CLIENT_KEY = "hdfs.datanode.client.address"; + public static final String HDFS_DATANODE_HTTP_ADDRESS_CLIENT_KEY = "hdfs.datanode.http.client.address"; + public static final String HDFS_DATANODE_IPC_ADDRESS_CLIENT_KEY = "hdfs.datanode.ipc.client.address"; + private HdfsConfig() {} } diff --git a/hadoop-unit-hdfs/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-hdfs/src/main/resources/hadoop-unit-default.properties index 2b55643c..7c171c9e 100644 --- a/hadoop-unit-hdfs/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hdfs/src/main/resources/hadoop-unit-default.properties @@ -42,6 +42,10 @@ hdfs.datanode.ipc.address=127.0.0.1:50020 hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=localhost +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 diff --git a/hadoop-unit-hdfs/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-hdfs/src/test/resources/hadoop-unit-default.properties index c0077bee..2cd08ae5 100644 --- a/hadoop-unit-hdfs/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hdfs/src/test/resources/hadoop-unit-default.properties @@ -39,6 +39,10 @@ hdfs.enable.running.user.as.proxy.user=true hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=localhost +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 diff --git a/hadoop-unit-hdfs3/src/main/java/fr/jetoile/hadoopunit/component/Hdfs3Config.java b/hadoop-unit-hdfs3/src/main/java/fr/jetoile/hadoopunit/component/Hdfs3Config.java index 3333cf72..00601155 100644 --- a/hadoop-unit-hdfs3/src/main/java/fr/jetoile/hadoopunit/component/Hdfs3Config.java +++ b/hadoop-unit-hdfs3/src/main/java/fr/jetoile/hadoopunit/component/Hdfs3Config.java @@ -34,5 +34,10 @@ public class Hdfs3Config { public static final String HDFS3_TEST_FILE_KEY = "hdfs3.test.file"; public static final String HDFS3_TEST_STRING_KEY = "hdfs3.test.string"; + public static final String HDFS3_NAMENODE_HOST_CLIENT_KEY = "hdfs3.namenode.client.host"; + public static final String HDFS3_DATANODE_ADDRESS_CLIENT_KEY = "hdfs3.datanode.client.address"; + public static final String HDFS3_DATANODE_HTTP_ADDRESS__CLIENT_KEY = "hdfs3.datanode.http.client.address"; + public static final String HDFS3_DATANODE_IPC_ADDRESS_CLIENT_KEY = "hdfs3.datanode.ipc.client.address"; + private Hdfs3Config() {} } diff --git a/hadoop-unit-hdfs3/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-hdfs3/src/main/resources/hadoop-unit-default.properties index a0f78ff3..cf15aa81 100644 --- a/hadoop-unit-hdfs3/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hdfs3/src/main/resources/hadoop-unit-default.properties @@ -17,3 +17,7 @@ hdfs3.datanode.ipc.address=127.0.0.1:50020 hdfs3.test.file=/tmp/testing hdfs3.test.string=TESTING +hdfs3.namenode.client.host=127.0.0.1 +hdfs3.datanode.client.address=127.0.0.1:50010 +hdfs3.datanode.http.client.address=127.0.0.1:50075 +hdfs3.datanode.ipc.client.address=127.0.0.1:50020 \ No newline at end of file diff --git a/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveConfig.java b/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveConfig.java index 04470097..ddd3f7d9 100644 --- a/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveConfig.java +++ b/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveConfig.java @@ -32,5 +32,9 @@ public class HiveConfig { public static final String HIVE_TEST_DATABASE_NAME_KEY = "hive.test.database.name"; public static final String HIVE_TEST_TABLE_NAME_KEY = "hive.test.table.name"; + + public static final String HIVE_METASTORE_HOSTNAME_CLIENT_KEY = "hive.metastore.client.hostname"; + public static final String HIVE_SERVER2_HOSTNAME_CLIENT_KEY = "hive.server2.client.hostname"; + private HiveConfig() {} } diff --git a/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveServer2Bootstrap.java b/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveServer2Bootstrap.java index b317dcde..91f22cec 100644 --- a/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveServer2Bootstrap.java +++ b/hadoop-unit-hive/src/main/java/fr/jetoile/hadoopunit/component/HiveServer2Bootstrap.java @@ -18,10 +18,7 @@ import com.github.sakserv.minicluster.util.WindowsLibsUtils; import fr.jetoile.hadoopunit.*; import fr.jetoile.hadoopunit.exception.BootstrapException; -import fr.jetoile.hadoopunit.exception.NotFoundServiceException; import org.apache.commons.configuration.Configuration; -import org.apache.commons.configuration.ConfigurationException; -import org.apache.commons.configuration.PropertiesConfiguration; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.slf4j.Logger; @@ -84,13 +81,13 @@ public String getProperties() { private void loadConfig() throws BootstrapException { host = configuration.getString(HiveConfig.HIVE_SERVER2_HOSTNAME_KEY); port = configuration.getInt(HiveConfig.HIVE_SERVER2_PORT_KEY); - hostMetastore = configuration.getString(HiveConfig.HIVE_METASTORE_HOSTNAME_KEY); + hostMetastore = configuration.getString(HiveConfig.HIVE_METASTORE_HOSTNAME_CLIENT_KEY); portMetastore = configuration.getInt(HiveConfig.HIVE_METASTORE_PORT_KEY); derbyDirectory = configuration.getString(HiveConfig.HIVE_METASTORE_DERBY_DB_DIR_KEY); scratchDirectory = getTmpDirPath(configuration, HiveConfig.HIVE_SCRATCH_DIR_KEY); warehouseDirectory = getTmpDirPath(configuration, HiveConfig.HIVE_WAREHOUSE_DIR_KEY); - zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); - hdfsUri = "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HdfsConfig.HDFS_NAMENODE_PORT_KEY); + zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + hdfsUri = "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + configuration.getString(HdfsConfig.HDFS_NAMENODE_PORT_KEY); } @Override @@ -101,8 +98,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(HiveConfig.HIVE_SERVER2_PORT_KEY))) { port = Integer.parseInt(configs.get(HiveConfig.HIVE_SERVER2_PORT_KEY)); } - if (StringUtils.isNotEmpty(configs.get(HiveConfig.HIVE_METASTORE_HOSTNAME_KEY))) { - hostMetastore = configs.get(HiveConfig.HIVE_METASTORE_HOSTNAME_KEY); + if (StringUtils.isNotEmpty(configs.get(HiveConfig.HIVE_METASTORE_HOSTNAME_CLIENT_KEY))) { + hostMetastore = configs.get(HiveConfig.HIVE_METASTORE_HOSTNAME_CLIENT_KEY); } if (StringUtils.isNotEmpty(configs.get(HiveConfig.HIVE_METASTORE_PORT_KEY))) { portMetastore = Integer.parseInt(configs.get(HiveConfig.HIVE_METASTORE_PORT_KEY)); @@ -116,11 +113,11 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(HiveConfig.HIVE_WAREHOUSE_DIR_KEY))) { warehouseDirectory = configs.get(HiveConfig.HIVE_WAREHOUSE_DIR_KEY); } - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); } - if (StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY))) { - hdfsUri = "hdfs://" + configs.get(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + Integer.parseInt(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY)); + if (StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY))) { + hdfsUri = "hdfs://" + configs.get(HdfsConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + Integer.parseInt(configs.get(HdfsConfig.HDFS_NAMENODE_PORT_KEY)); } } diff --git a/hadoop-unit-hive/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-hive/src/main/resources/hadoop-unit-default.properties index c0077bee..bd31994e 100644 --- a/hadoop-unit-hive/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hive/src/main/resources/hadoop-unit-default.properties @@ -7,6 +7,8 @@ zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir @@ -16,10 +18,14 @@ hive.metastore.hostname=localhost hive.metastore.port=20102 hive.metastore.derby.db.dir=/metastore_db +hive.metastore.client.hostname=localhost + # Hive Server2 hive.server2.hostname=localhost hive.server2.port=20103 +hive.server2.client.hostname=localhost + # Hive Test hive.test.database.name=default hive.test.table.name=test_table diff --git a/hadoop-unit-hive/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-hive/src/test/resources/hadoop-unit-default.properties index c0077bee..c73881e7 100644 --- a/hadoop-unit-hive/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hive/src/test/resources/hadoop-unit-default.properties @@ -6,6 +6,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir @@ -16,10 +17,14 @@ hive.metastore.hostname=localhost hive.metastore.port=20102 hive.metastore.derby.db.dir=/metastore_db +hive.metastore.client.hostname=localhost + # Hive Server2 hive.server2.hostname=localhost hive.server2.port=20103 +hive.server2.client.hostname=localhost + # Hive Test hive.test.database.name=default hive.test.table.name=test_table diff --git a/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java b/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java index 87e84c1e..0dd5d9f6 100644 --- a/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java +++ b/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java @@ -32,5 +32,8 @@ public class Hive3Config { public static final String HIVE3_TEST_DATABASE_NAME_KEY = "hive3.test.database.name"; public static final String HIVE3_TEST_TABLE_NAME_KEY = "hive3.test.table.name"; + public static final String HIVE3_METASTORE_HOSTNAME_CLIENT_KEY = "hive3.metastore.client.hostname"; + + private Hive3Config() {} } diff --git a/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/HiveMetastore3Bootstrap.java b/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/HiveMetastore3Bootstrap.java index 2c44d6f1..806d892e 100644 --- a/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/HiveMetastore3Bootstrap.java +++ b/hadoop-unit-hivemeta3/src/main/java/fr/jetoile/hadoopunit/component/HiveMetastore3Bootstrap.java @@ -123,7 +123,7 @@ private HiveConf buildHiveConf() { WindowsLibsUtils.setHadoopHome(); HiveConf hiveConf = new HiveConf(); - hiveConf.set("fs.defaultFS", "hdfs://" + configuration.getString(Hdfs3Config.HDFS3_NAMENODE_HOST_KEY) + ":" + configuration.getInt(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY)); + hiveConf.set("fs.defaultFS", "hdfs://" + configuration.getString(Hdfs3Config.HDFS3_NAMENODE_HOST_CLIENT_KEY) + ":" + configuration.getInt(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY)); // hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); // hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true"); // hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5"); diff --git a/hadoop-unit-hivemeta3/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-hivemeta3/src/main/resources/hadoop-unit-default.properties index f30aa7da..fc9fd90a 100644 --- a/hadoop-unit-hivemeta3/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hivemeta3/src/main/resources/hadoop-unit-default.properties @@ -16,10 +16,14 @@ hive3.metastore.hostname=localhost hive3.metastore.port=20102 hive3.metastore.derby.db.dir=/metastore_db +hive3.metastore.client.hostname=localhost + # Hive Server2 hive3.server2.hostname=localhost hive3.server2.port=20103 +hive3.server2.client.hostname=localhost + # Hive Test hive3.test.database.name=default hive3.test.table.name=test_table @@ -39,6 +43,7 @@ hdfs3.enable.running.user.as.proxy.user=true hdfs3.test.file=/tmp/testing hdfs3.test.string=TESTING +hdfs3.namenode.client.host=localhost # HBase hbase.master.port=25111 diff --git a/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java b/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java index 87e84c1e..057ac32e 100644 --- a/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java +++ b/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/Hive3Config.java @@ -32,5 +32,8 @@ public class Hive3Config { public static final String HIVE3_TEST_DATABASE_NAME_KEY = "hive3.test.database.name"; public static final String HIVE3_TEST_TABLE_NAME_KEY = "hive3.test.table.name"; + public static final String HIVE3_METASTORE_HOSTNAME_CLIENT_KEY = "hive3.metastore.client.hostname"; + public static final String HIVE3_SERVER2_HOSTNAME_CLIENT_KEY = "hive3.server2.client.hostname"; + private Hive3Config() {} } diff --git a/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/HiveServer23Bootstrap.java b/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/HiveServer23Bootstrap.java index c8b532fa..efea0e49 100644 --- a/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/HiveServer23Bootstrap.java +++ b/hadoop-unit-hiveserver23/src/main/java/fr/jetoile/hadoopunit/component/HiveServer23Bootstrap.java @@ -80,12 +80,12 @@ public String getProperties() { private void loadConfig() throws BootstrapException { host = configuration.getString(Hive3Config.HIVE3_SERVER2_HOSTNAME_KEY); port = configuration.getInt(Hive3Config.HIVE3_SERVER2_PORT_KEY); - hostMetastore = configuration.getString(Hive3Config.HIVE3_METASTORE_HOSTNAME_KEY); + hostMetastore = configuration.getString(Hive3Config.HIVE3_METASTORE_HOSTNAME_CLIENT_KEY); portMetastore = configuration.getInt(Hive3Config.HIVE3_METASTORE_PORT_KEY); scratchDirectory = getTmpDirPath(configuration, Hive3Config.HIVE3_SCRATCH_DIR_KEY); warehouseDirectory = configuration.getString(Hive3Config.HIVE3_WAREHOUSE_DIR_KEY); - zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); - hdfsUri = "hdfs://" + configuration.getString(Hdfs3Config.HDFS3_NAMENODE_HOST_KEY) + ":" + configuration.getString(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY); + zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + hdfsUri = "hdfs://" + configuration.getString(Hdfs3Config.HDFS3_NAMENODE_HOST_CLIENT_KEY) + ":" + configuration.getString(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY); } @Override @@ -96,8 +96,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(Hive3Config.HIVE3_SERVER2_PORT_KEY))) { port = Integer.parseInt(configs.get(Hive3Config.HIVE3_SERVER2_PORT_KEY)); } - if (StringUtils.isNotEmpty(configs.get(Hive3Config.HIVE3_METASTORE_HOSTNAME_KEY))) { - hostMetastore = configs.get(Hive3Config.HIVE3_METASTORE_HOSTNAME_KEY); + if (StringUtils.isNotEmpty(configs.get(Hive3Config.HIVE3_METASTORE_HOSTNAME_CLIENT_KEY))) { + hostMetastore = configs.get(Hive3Config.HIVE3_METASTORE_HOSTNAME_CLIENT_KEY); } if (StringUtils.isNotEmpty(configs.get(Hive3Config.HIVE3_METASTORE_PORT_KEY))) { portMetastore = Integer.parseInt(configs.get(Hive3Config.HIVE3_METASTORE_PORT_KEY)); @@ -108,11 +108,11 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(Hive3Config.HIVE3_WAREHOUSE_DIR_KEY))) { warehouseDirectory = configs.get(Hive3Config.HIVE3_WAREHOUSE_DIR_KEY); } - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); } - if (StringUtils.isNotEmpty(configs.get(Hdfs3Config.HDFS3_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY))) { - hdfsUri = "hdfs://" + configs.get(Hdfs3Config.HDFS3_NAMENODE_HOST_KEY) + ":" + Integer.parseInt(configs.get(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY)); + if (StringUtils.isNotEmpty(configs.get(Hdfs3Config.HDFS3_NAMENODE_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY))) { + hdfsUri = "hdfs://" + configs.get(Hdfs3Config.HDFS3_NAMENODE_HOST_CLIENT_KEY) + ":" + Integer.parseInt(configs.get(Hdfs3Config.HDFS3_NAMENODE_PORT_KEY)); } } diff --git a/hadoop-unit-hiveserver23/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-hiveserver23/src/main/resources/hadoop-unit-default.properties index 874a4f7b..84cf8728 100644 --- a/hadoop-unit-hiveserver23/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-hiveserver23/src/main/resources/hadoop-unit-default.properties @@ -7,6 +7,8 @@ zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + # Hive hive3.scratch.dir=/hive_scratch_dir hive3.warehouse.dir=/tmp/warehouse_dir @@ -16,10 +18,14 @@ hive3.metastore.hostname=localhost hive3.metastore.port=20102 hive3.metastore.derby.db.dir=/metastore_db +hive3.metastore.client.hostname=localhost + # Hive Server2 hive3.server2.hostname=localhost hive3.server2.port=20103 +hive3.server2.client.hostname=localhost + # Hive Test hive3.test.database.name=default hive3.test.table.name=test_table @@ -39,6 +45,10 @@ hdfs3.enable.running.user.as.proxy.user=true hdfs3.test.file=/tmp/testing hdfs3.test.string=TESTING +hdfs3.namenode.client.host=127.0.0.1 +hdfs3.datanode.client.address=127.0.0.1:50010 +hdfs3.datanode.http.client.address=127.0.0.1:50075 +hdfs3.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 diff --git a/hadoop-unit-kafka/src/main/java/fr/jetoile/hadoopunit/component/KafkaBootstrap.java b/hadoop-unit-kafka/src/main/java/fr/jetoile/hadoopunit/component/KafkaBootstrap.java index 1a802de0..386d760a 100644 --- a/hadoop-unit-kafka/src/main/java/fr/jetoile/hadoopunit/component/KafkaBootstrap.java +++ b/hadoop-unit-kafka/src/main/java/fr/jetoile/hadoopunit/component/KafkaBootstrap.java @@ -93,6 +93,7 @@ private void build() { .build(); Properties kafkaProperties = kafkaLocalCluster.getKafkaProperties(); + kafkaProperties.put("advertised.listeners", "PLAINTEXT://" + ":" + port); kafkaProperties.put("default.replication.factor", "1"); kafkaProperties.put("offsets.topic.replication.factor", "1"); } @@ -102,7 +103,7 @@ private void loadConfig() { port = configuration.getInt(KafkaConfig.KAFKA_PORT_KEY); brokerId = configuration.getInt(KafkaConfig.KAFKA_TEST_BROKER_ID_KEY); tmpDirectory = getTmpDirPath(configuration, KafkaConfig.KAFKA_TEST_TEMP_DIR_KEY); - zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + zookeeperConnectionString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); } @Override @@ -119,8 +120,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(KafkaConfig.KAFKA_TEST_TEMP_DIR_KEY))) { tmpDirectory = getTmpDirPath(configs, KafkaConfig.KAFKA_TEST_TEMP_DIR_KEY); } - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + zookeeperConnectionString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); } } diff --git a/hadoop-unit-kafka/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-kafka/src/main/resources/hadoop-unit-default.properties index c0077bee..573d1972 100644 --- a/hadoop-unit-kafka/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-kafka/src/main/resources/hadoop-unit-default.properties @@ -7,6 +7,8 @@ zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir diff --git a/hadoop-unit-kafka/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-kafka/src/test/resources/hadoop-unit-default.properties index c0077bee..573d1972 100644 --- a/hadoop-unit-kafka/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-kafka/src/test/resources/hadoop-unit-default.properties @@ -7,6 +7,8 @@ zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir diff --git a/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxBootstrap.java b/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxBootstrap.java index ede36c38..fd154790 100644 --- a/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxBootstrap.java +++ b/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxBootstrap.java @@ -164,10 +164,10 @@ private void loadConfig() { List servicesList = Arrays.asList(configuration.getStringArray(KnoxConfig.KNOX_SERVICE_KEY)); services = Arrays.asList(KnoxService.values()).stream().filter(s -> servicesList.contains(s.getName())).collect(Collectors.toList()); - namenodeUri = "hdfs://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(KnoxConfig.HDFS_NAMENODE_PORT_KEY); - webHdfsUri = "http://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HTTP_PORT_KEY) + "/webhdfs"; - webHBaseUri = "http://" + configuration.getString(KnoxConfig.HBASE_REST_HOST_KEY) + ":" + configuration.getString(KnoxConfig.HBASE_REST_PORT_KEY); - oozieUri = "http://" + configuration.getString(KnoxConfig.OOZIE_HOST) + ":" + configuration.getString(KnoxConfig.OOZIE_PORT) + "/oozie"; + namenodeUri = "hdfs://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + configuration.getString(KnoxConfig.HDFS_NAMENODE_PORT_KEY); + webHdfsUri = "http://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HTTP_PORT_KEY) + "/webhdfs"; + webHBaseUri = "http://" + configuration.getString(KnoxConfig.HBASE_REST_HOST_CLIENT_KEY) + ":" + configuration.getString(KnoxConfig.HBASE_REST_PORT_KEY); + oozieUri = "http://" + configuration.getString(KnoxConfig.OOZIE_CLIENT_HOST) + ":" + configuration.getString(KnoxConfig.OOZIE_PORT) + "/oozie"; } @@ -192,17 +192,17 @@ public void loadConfig(Map configs) { List servicesList = Arrays.asList(configuration.getStringArray(KnoxConfig.KNOX_SERVICE_KEY)); services = Arrays.asList(KnoxService.values()).stream().filter(s -> servicesList.contains(s.getName())).collect(Collectors.toList()); } - if (StringUtils.isNotEmpty(configs.get(KnoxConfig.HDFS_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(KnoxConfig.HDFS_NAMENODE_PORT_KEY)) { - namenodeUri = "hdfs://" + configs.get(KnoxConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configs.get(KnoxConfig.HDFS_NAMENODE_PORT_KEY); + if (StringUtils.isNotEmpty(configs.get(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(KnoxConfig.HDFS_NAMENODE_PORT_KEY)) { + namenodeUri = "hdfs://" + configs.get(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + configs.get(KnoxConfig.HDFS_NAMENODE_PORT_KEY); } - if (StringUtils.isNotEmpty(configs.get(KnoxConfig.HDFS_NAMENODE_HOST_KEY)) && StringUtils.isNotEmpty(KnoxConfig.HDFS_NAMENODE_HTTP_PORT_KEY)) { - webHdfsUri = "http://" + configs.get(KnoxConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configs.get(KnoxConfig.HDFS_NAMENODE_HTTP_PORT_KEY) + "/webhdfs"; + if (StringUtils.isNotEmpty(configs.get(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(KnoxConfig.HDFS_NAMENODE_HTTP_PORT_KEY)) { + webHdfsUri = "http://" + configs.get(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + configs.get(KnoxConfig.HDFS_NAMENODE_HTTP_PORT_KEY) + "/webhdfs"; } - if (StringUtils.isNotEmpty(configs.get(KnoxConfig.HBASE_REST_HOST_KEY)) && StringUtils.isNotEmpty(KnoxConfig.HBASE_REST_PORT_KEY)) { - webHBaseUri = "http://" + configs.get(KnoxConfig.HBASE_REST_HOST_KEY) + ":" + configs.get(KnoxConfig.HBASE_REST_PORT_KEY); + if (StringUtils.isNotEmpty(configs.get(KnoxConfig.HBASE_REST_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(KnoxConfig.HBASE_REST_PORT_KEY)) { + webHBaseUri = "http://" + configs.get(KnoxConfig.HBASE_REST_HOST_CLIENT_KEY) + ":" + configs.get(KnoxConfig.HBASE_REST_PORT_KEY); } - if (StringUtils.isNotEmpty(configs.get(KnoxConfig.OOZIE_HOST)) && StringUtils.isNotEmpty(KnoxConfig.OOZIE_PORT)) { - webHBaseUri = "http://" + configs.get(KnoxConfig.OOZIE_HOST) + ":" + configs.get(KnoxConfig.OOZIE_PORT); + if (StringUtils.isNotEmpty(configs.get(KnoxConfig.OOZIE_CLIENT_HOST)) && StringUtils.isNotEmpty(KnoxConfig.OOZIE_PORT)) { + webHBaseUri = "http://" + configs.get(KnoxConfig.OOZIE_CLIENT_HOST) + ":" + configs.get(KnoxConfig.OOZIE_PORT); } } diff --git a/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxConfig.java b/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxConfig.java index 21dd8139..f4d71727 100644 --- a/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxConfig.java +++ b/hadoop-unit-knox/src/main/java/fr/jetoile/hadoopunit/component/KnoxConfig.java @@ -24,7 +24,7 @@ public class KnoxConfig { public static final String KNOX_SERVICE_KEY = "knox.service"; //HDFS - public static final String HDFS_NAMENODE_HOST_KEY = "hdfs.namenode.host"; + public static final String HDFS_NAMENODE_HOST_CLIENT_KEY = "hdfs.namenode.client.host"; public static final String HDFS_NAMENODE_PORT_KEY = "hdfs.namenode.port"; public static final String HDFS_NAMENODE_HTTP_PORT_KEY = "hdfs.namenode.http.port"; @@ -34,7 +34,7 @@ public class KnoxConfig { // HBase Rest public static final String HBASE_REST_PORT_KEY = "hbase.rest.port"; - public static final String HBASE_REST_HOST_KEY="hbase.rest.host"; + public static final String HBASE_REST_HOST_CLIENT_KEY="hbase.rest.client.host"; // HBase Test public static final String HBASE_TEST_TABLE_NAME_KEY = "hbase.test.table.name"; @@ -44,7 +44,7 @@ public class KnoxConfig { //Oozie public static final String OOZIE_PORT = "oozie.port"; - public static final String OOZIE_HOST = "oozie.host"; + public static final String OOZIE_CLIENT_HOST = "oozie.client.host"; private KnoxConfig() {} } diff --git a/hadoop-unit-knox/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-knox/src/main/resources/hadoop-unit-default.properties index e2d85b9b..904bcf57 100644 --- a/hadoop-unit-knox/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-knox/src/main/resources/hadoop-unit-default.properties @@ -39,6 +39,11 @@ hdfs.enable.running.user.as.proxy.user=true hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=localhost +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 + # HBase hbase.master.port=25111 @@ -56,6 +61,9 @@ hbase.rest.host=0.0.0.0 hbase.rest.threads.max=100 hbase.rest.threads.min=2 +hbase.rest.client.host=127.0.0.1 + + # HBase Test hbase.test.table.name=hbase_test_table hbase.test.col.family.name=cf1 @@ -111,6 +119,8 @@ oozie.sharelib.name=oozie-4.2.0.2.3.2.0-2950-distro.tar.gz oozie.port=20113 oozie.host=localhost +oozie.client.host=localhost + # KNOX knox.host=localhost knox.port=8888 diff --git a/hadoop-unit-knox/src/test/java/fr/jetoile/hadoopunit/component/KnoxBootstrapTopologyTest.java b/hadoop-unit-knox/src/test/java/fr/jetoile/hadoopunit/component/KnoxBootstrapTopologyTest.java index c1d5ac26..7ee8f0ec 100644 --- a/hadoop-unit-knox/src/test/java/fr/jetoile/hadoopunit/component/KnoxBootstrapTopologyTest.java +++ b/hadoop-unit-knox/src/test/java/fr/jetoile/hadoopunit/component/KnoxBootstrapTopologyTest.java @@ -60,15 +60,15 @@ public void generatedTopology_should_be_ok_with_3_services() throws IOException " \n" + " \n" + " NAMENODE\n" + - " hdfs://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_KEY) + ":20112\n" + + " hdfs://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":20112\n" + " \n" + " \n" + " WEBHDFS\n" + - " http://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_KEY) + ":50070/webhdfs\n" + + " http://" + configuration.getString(KnoxConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":50070/webhdfs\n" + " \n" + " \n" + " WEBHBASE\n" + - " http://" + configuration.getString(KnoxConfig.HBASE_REST_HOST_KEY) + ":28000\n" + + " http://" + configuration.getString(KnoxConfig.HBASE_REST_HOST_CLIENT_KEY) + ":28000\n" + " \n" + ""); } @@ -93,7 +93,7 @@ public void generatedTopology_should_be_ok_with_1_service() throws IOException { " \n" + " \n" + " WEBHBASE\n" + - " http://" + configuration.getString(KnoxConfig.HBASE_REST_HOST_KEY) + ":28000\n" + + " http://" + configuration.getString(KnoxConfig.HBASE_REST_HOST_CLIENT_KEY) + ":28000\n" + " \n" + ""); } diff --git a/hadoop-unit-maven-plugin/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-maven-plugin/src/main/resources/hadoop-unit-default.properties index e3ed9466..fdceaeb6 100644 --- a/hadoop-unit-maven-plugin/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-maven-plugin/src/main/resources/hadoop-unit-default.properties @@ -7,26 +7,55 @@ zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + + # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir # Hive Metastore -hive.metastore.hostname=localhost +hive.metastore.hostname=127.0.0.1 hive.metastore.port=20102 hive.metastore.derby.db.dir=/metastore_db +hive.metastore.client.hostname=127.0.0.1 + # Hive Server2 -hive.server2.hostname=localhost +hive.server2.hostname=127.0.0.1 hive.server2.port=20103 +hive.server2.client.hostname=127.0.0.1 + # Hive Test hive.test.database.name=default hive.test.table.name=test_table +# Hive3 +hive3.scratch.dir=/hive_scratch_dir +hive3.warehouse.dir=/tmp/warehouse_dir + +# Hive Metastore 3 +hive3.metastore.hostname=localhost +hive3.metastore.port=20102 +hive3.metastore.derby.db.dir=metastore_db + +hive3.metastore.client.hostname=localhost + +# Hive Server2 3 +hive3.server2.hostname=localhost +hive3.server2.port=20103 + +hive3.server2.client.hostname=localhost + +# Hive Test 3 +hive3.test.database.name=default +hive3.test.table.name=test_table + + # HDFS -hdfs.namenode.host=localhost +hdfs.namenode.host=127.0.0.1 hdfs.namenode.port=20112 hdfs.namenode.http.port=50070 hdfs.temp.dir=/embedded_hdfs @@ -34,11 +63,41 @@ hdfs.num.datanodes=1 hdfs.enable.permissions=false hdfs.format=true hdfs.enable.running.user.as.proxy.user=true +hdfs.datanode.address=127.0.0.1:50010 +hdfs.datanode.http.address=127.0.0.1:50075 +hdfs.datanode.ipc.address=127.0.0.1:50020 # HDFS Test hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=127.0.0.1 +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 + + +# HDFS3 +hdfs3.namenode.host=127.0.0.1 +hdfs3.namenode.port=20112 +hdfs3.namenode.http.port=50070 +hdfs3.temp.dir=/embedded_hdfs +hdfs3.num.datanodes=1 +hdfs3.enable.permissions=false +hdfs3.format=true +hdfs3.enable.running.user.as.proxy.user=true +hdfs3.datanode.address=127.0.0.1:50010 +hdfs3.datanode.http.address=127.0.0.1:50075 +hdfs3.datanode.ipc.address=127.0.0.1:50020 + +# HDFS3 Test +hdfs3.test.file=/tmp/testing +hdfs3.test.string=TESTING + +hdfs3.namenode.client.host=127.0.0.1 +hdfs3.datanode.client.address=127.0.0.1:50010 +hdfs3.datanode.http.client.address=127.0.0.1:50075 +hdfs3.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 @@ -56,6 +115,8 @@ hbase.rest.host=0.0.0.0 hbase.rest.threads.max=100 hbase.rest.threads.min=2 +hbase.rest.client.host=127.0.0.1 + # HBase Test hbase.test.table.name=hbase_test_table hbase.test.col.family.name=cf1 @@ -83,19 +144,47 @@ solr.collection.name=collection1 solr.cloud.port=8983 + + + # YARN yarn.num.node.managers=1 yarn.num.local.dirs=1 yarn.num.log.dirs=1 -yarn.resource.manager.address=localhost:37001 -yarn.resource.manager.hostname=localhost -yarn.resource.manager.scheduler.address=localhost:37002 -yarn.resource.manager.resource.tracker.address=localhost:37003 -yarn.resource.manager.webapp.address=localhost:37004 +yarn.resource.manager.address=127.0.0.1:37001 +yarn.resource.manager.hostname=127.0.0.1 +yarn.resource.manager.scheduler.address=127.0.0.1:37002 +yarn.resource.manager.resource.tracker.address=127.0.0.1:37003 +yarn.resource.manager.webapp.address=127.0.0.1:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 + + +# YARN3 +yarn3.num.node.managers=1 +yarn3.num.local.dirs=1 +yarn3.num.log.dirs=1 +yarn3.resource.manager.address=localhost:37001 +yarn3.resource.manager.hostname=localhost +yarn3.resource.manager.scheduler.address=localhost:37002 +yarn3.resource.manager.resource.tracker.address=localhost:37003 +yarn3.resource.manager.webapp.address=localhost:37004 +yarn3.use.in.jvm.container.executor=false + +yarn3.resource.manager.client.address=localhost:37001 +yarn3.resource.manager.client.hostname=localhost +yarn3.resource.manager.scheduler.client.address=localhost:37002 +yarn3.resource.manager.resource.tracker.client.address=localhost:37003 +yarn3.resource.manager.webapp.client.address=localhost:37004 + # MR -mr.job.history.address=localhost:37005 +mr.job.history.address=127.0.0.1:37005 +mr.job.history.client.address=127.0.0.1:37005 # Oozie oozie.tmp.dir=/oozie_tmp @@ -107,20 +196,23 @@ oozie.hdfs.share.lib.dir=/tmp/share_lib oozie.share.lib.create=true oozie.local.share.lib.cache.dir=/tmp/share_lib_cache oozie.purge.local.share.lib.cache=false -oozie.sharelib.path=/home/khanh/github +oozie.sharelib.path=~/github oozie.sharelib.name=oozie-4.2.0.2.6.5.0-292-distro.tar.gz oozie.port=20113 -oozie.host=localhost -oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING +oozie.host=127.0.0.1 +oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING,SPARK #oozie.sharelib.component=OOZIE,HCATALOG,DISTCP,MAPREDUCE_STREAMING,PIG,HIVE,HIVE2,SQOOP,SPARK +oozie.client.host=localhost + # ElasticSearch -elasticsearch.version=5.4.3 +elasticsearch.version=6.7.1 elasticsearch.ip=127.0.0.1 elasticsearch.http.port=14433 elasticsearch.tcp.port=14533 elasticsearch.index.name=test_index elasticsearch.cluster.name=elasticsearch +#elasticsearch.download.url=https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.4.3.zip # MongoDB mongo.ip=127.0.0.1 @@ -139,31 +231,31 @@ neo4j.port=13533 neo4j.temp.dir=/embedded_neo4j # KNOX -knox.host=localhost +knox.host=127.0.0.1 knox.port=8888 knox.path=gateway knox.cluster=mycluster knox.home.dir=/embedded_knox knox.service=namenode,webhdfs,webhbase +#knox.service=namenode,webhdfs,webhbase,oozie # Alluxio #alluxio.work.dir=/tmp/alluxio -alluxio.work.dir=hdfs://localhost:20112/alluxio -alluxio.hostname=localhost +alluxio.work.dir=hdfs://127.0.0.1:20112/alluxio +alluxio.hostname=127.0.0.1 alluxio.master.port=14001 alluxio.master.web.port=14002 alluxio.proxy.web.port=14100 alluxio.worker.web.port=14003 alluxio.worker.data.port=14004 alluxio.worker.port=14005 -alluxio.webapp.directory=src/test/resources/alluxio/webapp - +alluxio.webapp.directory=conf/alluxio/webapp # Redis redis.port=6379 redis.download.url=http://download.redis.io/releases/ -redis.version=4.0.0 +redis.version=5.0.4 redis.cleanup.installation=false redis.temp.dir=/redis redis.type=SERVER @@ -171,4 +263,60 @@ redis.type=SERVER #redis.type=MASTER_SLAVE #redis.type=SENTINEL #redis.slave.ports=6380 -#redis.sentinel.ports=36479,36480,36481,36482,36483 \ No newline at end of file +#redis.sentinel.ports=36479,36480,36481,36482,36483 + + + + +# Confluent +confluent.schemaregistry.port=8081 +confluent.schemaregistry.host=127.0.0.1 +confluent.schemaregistry.kafkastore.topic=_schema +confluent.schemaregistry.debug=false + +confluent.kafka.log.dirs=/kafka-logs +confluent.kafka.broker.id=0 +confluent.kafka.port=22222 +confluent.kafka.host=127.0.0.1 + +confluent.rest.host=127.0.0.1 +confluent.rest.port=8082 + +confluent.ksql.host=127.0.0.1 +confluent.ksql.port=8083 + +confluent.schemaregistry.client.host=127.0.0.1 +confluent.kafka.client.host=127.0.0.1 +confluent.rest.client.host=127.0.0.1 +confluent.ksql.client.host=127.0.0.1 + +# Docker +docker.imagename=alpine:3.2 +docker.exposedports=80 +docker.envs=MAGIC_NUMBER:42 +docker.labels=MAGIC_NUMBER:42 +docker.command=/bin/sh, -c, while true; do echo \"$MAGIC_NUMBER\" | nc -l -p 80; done +docker.fixed.exposedports=21300:80 +#docker.classpath.resources.mapping=hadoop-unit-default.properties:/hadoop-unit-default.properties:READ_ONLY + +# Docker compose +dockercompose.filename=conf/docker-compose.yml +#dockercompose.exposedports=zoo:2181,resourcemanager:8088 +dockercompose.local=false + + + +# Pulsar +pulsar.zookeeper.temp.dir=/pulsar/embedded_zk +pulsar.zookeeper.port=22020 +pulsar.ip=127.0.0.1 +pulsar.port=22022 +pulsar.temp.dir=/pulsar + +# BookKeeper +bookkeeper.ip=127.0.0.1 +bookkeeper.port=31810 +bookkeeper.http.port=31900 +bookkeeper.temp.dir=/bookeeper + +bookkeeper.client.ip=127.0.0.1 diff --git a/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieBootstrap.java b/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieBootstrap.java index 9194a63f..ee3af38f 100644 --- a/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieBootstrap.java +++ b/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieBootstrap.java @@ -46,7 +46,6 @@ public class OozieBootstrap implements BootstrapHadoop { private static final String SHARE_LIB_PREFIX = "lib_"; private OozieLocalServer oozieLocalCluster; - private MRLocalCluster mrLocalCluster; private State state = State.STOPPED; @@ -57,20 +56,12 @@ public class OozieBootstrap implements BootstrapHadoop { private String oozieUsername; private String oozieGroupname; private String oozieYarnResourceManagerAddress; - private org.apache.hadoop.conf.Configuration hadoopConf; private String hdfsDefaultFs; private String oozieHdfsShareLibDir; private boolean oozieShareLibCreate; private String oozieLocalShareLibCacheDir; private boolean ooziePurgeLocalShareLibCache; - private int numNodeManagers; - private String jobHistoryAddress; - private String resourceManagerAddress; - private String resourceManagerHostname; - private String resourceManagerSchedulerAddress; - private String resourceManagerResourceTrackerAddress; private String resourceManagerWebappAddress; - private boolean useInJvmContainerExecutor; private String oozieShareLibPath; private String oozieShareLibName; private int ooziePort; @@ -117,6 +108,7 @@ private void init() { } private void build() throws NotFoundServiceException { + hdfsDefaultFs = "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_CLIENT_KEY) + ":" + configuration.getString(HdfsConfig.HDFS_NAMENODE_PORT_KEY); org.apache.hadoop.conf.Configuration hadoopConf = new org.apache.hadoop.conf.Configuration(); hadoopConf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*"); @@ -124,22 +116,7 @@ private void build() throws NotFoundServiceException { hadoopConf.set("oozie.service.WorkflowAppService.system.libpath", hdfsDefaultFs + "/" + oozieHdfsShareLibDir); hadoopConf.set("oozie.use.system.libpath", "true"); - hadoopConf.set("fs.defaultFS", "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HdfsConfig.HDFS_NAMENODE_PORT_KEY)); - hdfsDefaultFs = "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getString(HdfsConfig.HDFS_NAMENODE_PORT_KEY); - - mrLocalCluster = new MRLocalCluster.Builder() - .setNumNodeManagers(numNodeManagers) - .setJobHistoryAddress(jobHistoryAddress) - .setResourceManagerAddress(resourceManagerAddress) - .setResourceManagerHostname(resourceManagerHostname) - .setResourceManagerSchedulerAddress(resourceManagerSchedulerAddress) - .setResourceManagerResourceTrackerAddress(resourceManagerResourceTrackerAddress) - .setResourceManagerWebappAddress(resourceManagerWebappAddress) - .setUseInJvmContainerExecutor(useInJvmContainerExecutor) - .setHdfsDefaultFs(hdfsDefaultFs) - .setConfig(hadoopConf) - .build(); - + hadoopConf.set("fs.defaultFS", hdfsDefaultFs); oozieLocalCluster = new OozieLocalServer.Builder() .setOozieTestDir(oozieTestDir) @@ -167,7 +144,7 @@ private void loadConfig() throws BootstrapException, NotFoundServiceException { oozieHomeDir = configuration.getString(OozieConfig.OOZIE_HOME_DIR_KEY); oozieUsername = System.getProperty("user.name"); oozieGroupname = configuration.getString(OozieConfig.OOZIE_GROUPNAME_KEY); - oozieYarnResourceManagerAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY); + oozieYarnResourceManagerAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_CLIENT_KEY); oozieHdfsShareLibDir = configuration.getString(OozieConfig.OOZIE_HDFS_SHARE_LIB_DIR_KEY); oozieShareLibCreate = configuration.getBoolean(OozieConfig.OOZIE_SHARE_LIB_CREATE_KEY); @@ -176,14 +153,7 @@ private void loadConfig() throws BootstrapException, NotFoundServiceException { oozieTmpDir = getTmpDirPath(configuration, OozieConfig.OOZIE_TMP_DIR_KEY); - numNodeManagers = Integer.parseInt(configuration.getString(YarnConfig.YARN_NUM_NODE_MANAGERS_KEY)); - jobHistoryAddress = configuration.getString(YarnConfig.MR_JOB_HISTORY_ADDRESS_KEY); - resourceManagerAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY); - resourceManagerHostname = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_HOSTNAME_KEY); - resourceManagerSchedulerAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY); - resourceManagerResourceTrackerAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY); - resourceManagerWebappAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY); - useInJvmContainerExecutor = configuration.getBoolean(YarnConfig.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY); + resourceManagerWebappAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_CLIENT_KEY); ooziePort = configuration.getInt(OozieConfig.OOZIE_PORT); oozieHost = configuration.getString(OozieConfig.OOZIE_HOST); @@ -206,8 +176,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(OozieConfig.OOZIE_GROUPNAME_KEY))) { oozieGroupname = configs.get(OozieConfig.OOZIE_GROUPNAME_KEY); } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY))) { - oozieYarnResourceManagerAddress = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY); + if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_CLIENT_KEY))) { + oozieYarnResourceManagerAddress = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_CLIENT_KEY); } if (StringUtils.isNotEmpty(configs.get(OozieConfig.OOZIE_HDFS_SHARE_LIB_DIR_KEY))) { oozieHdfsShareLibDir = configs.get(OozieConfig.OOZIE_HDFS_SHARE_LIB_DIR_KEY); @@ -226,29 +196,8 @@ public void loadConfig(Map configs) { oozieTmpDir = getTmpDirPath(configs, OozieConfig.OOZIE_TMP_DIR_KEY); } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_NUM_NODE_MANAGERS_KEY))) { - numNodeManagers = Integer.parseInt(configs.get(YarnConfig.YARN_NUM_NODE_MANAGERS_KEY)); - } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.MR_JOB_HISTORY_ADDRESS_KEY))) { - jobHistoryAddress = configs.get(YarnConfig.MR_JOB_HISTORY_ADDRESS_KEY); - } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY))) { - resourceManagerAddress = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY); - } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_HOSTNAME_KEY))) { - resourceManagerHostname = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_HOSTNAME_KEY); - } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY))) { - resourceManagerSchedulerAddress = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY); - } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY))) { - resourceManagerResourceTrackerAddress = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY); - } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY))) { - resourceManagerWebappAddress = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY); - } - if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY))) { - useInJvmContainerExecutor = Boolean.parseBoolean(configs.get(YarnConfig.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY)); + if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_CLIENT_KEY))) { + resourceManagerWebappAddress = configs.get(YarnConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_CLIENT_KEY); } if (StringUtils.isNotEmpty(configs.get(OozieConfig.OOZIE_PORT))) { @@ -282,7 +231,6 @@ public Bootstrap start() { LOGGER.error("unable to add oozie", e); } try { - mrLocalCluster.start(); oozieLocalCluster.start(); } catch (Exception e) { LOGGER.error("unable to add oozie", e); @@ -301,7 +249,6 @@ public Bootstrap stop() { LOGGER.info("{} is stopping", this.getClass().getName()); try { oozieLocalCluster.stop(true); - mrLocalCluster.stop(true); cleanup(); } catch (Exception e) { LOGGER.error("unable to stop oozie", e); @@ -352,8 +299,12 @@ public void createShareLib() { FileSystem hdfsFileSystem = null; org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration(); - conf.set("fs.default.name", "hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getInt(HdfsConfig.HDFS_NAMENODE_PORT_KEY)); - URI uri = URI.create("hdfs://" + configuration.getString(HdfsConfig.HDFS_NAMENODE_HOST_KEY) + ":" + configuration.getInt(HdfsConfig.HDFS_NAMENODE_PORT_KEY)); + conf.set("fs.default.name", hdfsDefaultFs); + conf.set("oozie.service.WorkflowAppService.system.libpath", hdfsDefaultFs + "/" + oozieHdfsShareLibDir); + conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*"); + conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*"); + + URI uri = URI.create(hdfsDefaultFs); try { hdfsFileSystem = FileSystem.get(uri, conf); } catch (IOException e) { diff --git a/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieConfig.java b/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieConfig.java index b323acf0..7cfbe9b0 100644 --- a/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieConfig.java +++ b/hadoop-unit-oozie/src/main/java/fr/jetoile/hadoopunit/component/OozieConfig.java @@ -33,5 +33,7 @@ public class OozieConfig { public static final String SHARE_LIB_PREFIX = "lib_"; public static final String OOZIE_SHARE_LIB_COMPONENT_KEY = "oozie.sharelib.component"; + public static final String OOZIE_CLIENT_HOST = "oozie.client.host"; + private OozieConfig() {} } diff --git a/hadoop-unit-oozie/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-oozie/src/main/resources/hadoop-unit-default.properties index 4f3ef202..0036ed77 100644 --- a/hadoop-unit-oozie/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-oozie/src/main/resources/hadoop-unit-default.properties @@ -39,6 +39,10 @@ hdfs.enable.running.user.as.proxy.user=true hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=127.0.0.1 +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 @@ -79,15 +83,22 @@ solr.cloud.port=8983 yarn.num.node.managers=1 yarn.num.local.dirs=1 yarn.num.log.dirs=1 -yarn.resource.manager.address=localhost:37001 -yarn.resource.manager.hostname=localhost -yarn.resource.manager.scheduler.address=localhost:37002 -yarn.resource.manager.resource.tracker.address=localhost:37003 -yarn.resource.manager.webapp.address=localhost:37004 +yarn.resource.manager.address=0.0.0.0:37001 +yarn.resource.manager.hostname=0.0.0.0 +yarn.resource.manager.scheduler.address=0.0.0.0:37002 +yarn.resource.manager.resource.tracker.address=0.0.0.0:37003 +yarn.resource.manager.webapp.address=0.0.0.0:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 + # MR -mr.job.history.address=localhost:37005 +mr.job.history.address=0.0.0.0:37005 +mr.job.history.client.address=127.0.0.1:37005 # Oozie oozie.tmp.dir=/oozie_tmp @@ -106,6 +117,8 @@ oozie.host=localhost oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING,SPARK #oozie.sharelib.component=OOZIE,HCATALOG,DISTCP,MAPREDUCE_STREAMING,PIG,HIVE,HIVE2,SQOOP,SPARK +oozie.client.host=localhost + # MongoDB mongo.ip=127.0.0.1 mongo.port=13333 diff --git a/hadoop-unit-oozie/src/test/java/fr/jetoile/hadoopunit/component/OozieBootstrapTest.java b/hadoop-unit-oozie/src/test/java/fr/jetoile/hadoopunit/component/OozieBootstrapTest.java index 5c111ff3..933349fe 100644 --- a/hadoop-unit-oozie/src/test/java/fr/jetoile/hadoopunit/component/OozieBootstrapTest.java +++ b/hadoop-unit-oozie/src/test/java/fr/jetoile/hadoopunit/component/OozieBootstrapTest.java @@ -141,6 +141,7 @@ public void oozieShouldStart() throws Exception { oozieConf.setProperty(OozieClient.USER_NAME, UserGroupInformation.getCurrentUser().getUserName()); oozieConf.setProperty("jobTracker", "localhost:37001"); oozieConf.setProperty("nameNode", "hdfs://localhost:20112"); + oozieConf.setProperty("user.name", System.getProperty("user.name")); oozieConf.setProperty("doOption", "true"); //submit and check diff --git a/hadoop-unit-oozie/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-oozie/src/test/resources/hadoop-unit-default.properties index 03e44da1..0d2f8b9c 100644 --- a/hadoop-unit-oozie/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-oozie/src/test/resources/hadoop-unit-default.properties @@ -38,6 +38,10 @@ hdfs.enable.running.user.as.proxy.user=true hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=127.0.0.1 +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 @@ -78,15 +82,22 @@ solr.cloud.port=8983 yarn.num.node.managers=1 yarn.num.local.dirs=1 yarn.num.log.dirs=1 -yarn.resource.manager.address=localhost:37001 -yarn.resource.manager.hostname=localhost -yarn.resource.manager.scheduler.address=localhost:37002 -yarn.resource.manager.resource.tracker.address=localhost:37003 -yarn.resource.manager.webapp.address=localhost:37004 +yarn.resource.manager.address=0.0.0.0:37001 +yarn.resource.manager.hostname=0.0.0.0 +yarn.resource.manager.scheduler.address=0.0.0.0:37002 +yarn.resource.manager.resource.tracker.address=0.0.0.0:37003 +yarn.resource.manager.webapp.address=0.0.0.0:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 + # MR -mr.job.history.address=localhost:37005 +mr.job.history.address=0.0.0.0:37005 +mr.job.history.client.address=127.0.0.1:37005 # Oozie oozie.tmp.dir=/oozie_tmp @@ -99,12 +110,32 @@ oozie.share.lib.create=true oozie.local.share.lib.cache.dir=/tmp/share_lib_cache oozie.purge.local.share.lib.cache=false oozie.sharelib.path=/home/khanh/github -oozie.sharelib.name=oozie-4.2.0.2.6.1.0-129-distro.tar.gz +oozie.sharelib.name=oozie-4.2.0.2.6.5.0-292-distro.tar.gz oozie.port=20113 oozie.host=localhost oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING #oozie.sharelib.component=OOZIE,HCATALOG,DISTCP,MAPREDUCE_STREAMING,PIG,HIVE,HIVE2,SQOOP,SPARK +oozie.client.host=localhost + +# MongoDB +mongo.ip=127.0.0.1 +mongo.port=13333 +mongo.database.name=test_database +mongo.collection.name=test_collection + +# Cassandra +cassandra.ip=127.0.0.1 +cassandra.port=13433 +cassandra.temp.dir=/embedded_cassandra + +# ElasticSearch +elasticsearch.version=5.4.3 +elasticsearch.ip=127.0.0.1 +elasticsearch.http.port=14433 +elasticsearch.tcp.port=14533 +elasticsearch.index.name=test_index +elasticsearch.cluster.name=elasticsearch # Neo4j neo4j.ip=127.0.0.1 diff --git a/hadoop-unit-solrcloud/src/main/java/fr/jetoile/hadoopunit/component/SolrCloudBootstrap.java b/hadoop-unit-solrcloud/src/main/java/fr/jetoile/hadoopunit/component/SolrCloudBootstrap.java index b730d7dd..3b9cf7d1 100644 --- a/hadoop-unit-solrcloud/src/main/java/fr/jetoile/hadoopunit/component/SolrCloudBootstrap.java +++ b/hadoop-unit-solrcloud/src/main/java/fr/jetoile/hadoopunit/component/SolrCloudBootstrap.java @@ -82,7 +82,7 @@ public ComponentMetadata getMetadata() { @Override public String getProperties() { - return "\n \t\t\t zh:" + zkHostString + + return "\n \t\t\t zk:" + zkHostString + "\n \t\t\t port:" + solrPort + "\n \t\t\t collection:" + solrCollectionName; } @@ -115,7 +115,7 @@ private void loadConfig() throws BootstrapException { solrDirectory = configuration.getString(SolrCloudConfig.SOLR_DIR_KEY); solrCollectionName = configuration.getString(SolrCloudConfig.SOLR_COLLECTION_NAME); solrPort = configuration.getInt(SolrCloudConfig.SOLR_PORT); - zkHostString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + zkHostString = configuration.getString(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configuration.getInt(ZookeeperConfig.ZOOKEEPER_PORT_KEY); } @Override @@ -129,8 +129,8 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(SolrCloudConfig.SOLR_PORT))) { solrPort = Integer.parseInt(configs.get(SolrCloudConfig.SOLR_PORT)); } - if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { - zkHostString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); + if (StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY)) && StringUtils.isNotEmpty(configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY))) { + zkHostString = configs.get(ZookeeperConfig.ZOOKEEPER_HOST_CLIENT_KEY) + ":" + configs.get(ZookeeperConfig.ZOOKEEPER_PORT_KEY); } } diff --git a/hadoop-unit-solrcloud/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-solrcloud/src/main/resources/hadoop-unit-default.properties index 1b328f75..628f59cf 100644 --- a/hadoop-unit-solrcloud/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-solrcloud/src/main/resources/hadoop-unit-default.properties @@ -5,6 +5,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir diff --git a/hadoop-unit-solrcloud/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-solrcloud/src/test/resources/hadoop-unit-default.properties index 1b328f75..628f59cf 100644 --- a/hadoop-unit-solrcloud/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-solrcloud/src/test/resources/hadoop-unit-default.properties @@ -5,6 +5,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir diff --git a/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties index cf3e137a..2c4b2b00 100644 --- a/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties @@ -121,26 +121,33 @@ bookkeeper.metadataClass=fr.jetoile.hadoopunit.component.BookkeeperMetadata maven.central.repo=https://repo.maven.apache.org/maven2/ maven.local.repo=~/.m2/repository -maven.debug=true +#maven.debug=true # Zookeeper zookeeper.temp.dir=/embedded_zk -zookeeper.host=127.0.0.1 +zookeeper.host=0.0.0.0 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + + # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir # Hive Metastore -hive.metastore.hostname=127.0.0.1 +hive.metastore.hostname=0.0.0.0 hive.metastore.port=20102 hive.metastore.derby.db.dir=/metastore_db +hive.metastore.client.hostname=127.0.0.1 + # Hive Server2 -hive.server2.hostname=127.0.0.1 +hive.server2.hostname=0.0.0.0 hive.server2.port=20103 +hive.server2.client.hostname=127.0.0.1 + # Hive Test hive.test.database.name=default hive.test.table.name=test_table @@ -151,21 +158,25 @@ hive3.scratch.dir=/hive_scratch_dir hive3.warehouse.dir=/tmp/warehouse_dir # Hive Metastore 3 -hive3.metastore.hostname=localhost +hive3.metastore.hostname=0.0.0.0 hive3.metastore.port=20102 hive3.metastore.derby.db.dir=metastore_db +hive3.metastore.client.hostname=localhost + # Hive Server2 3 -hive3.server2.hostname=localhost +hive3.server2.hostname=0.0.0.0 hive3.server2.port=20103 +hive3.server2.client.hostname=localhost + # Hive Test 3 hive3.test.database.name=default hive3.test.table.name=test_table # HDFS -hdfs.namenode.host=127.0.0.1 +hdfs.namenode.host=0.0.0.0 hdfs.namenode.port=20112 hdfs.namenode.http.port=50070 hdfs.temp.dir=/embedded_hdfs @@ -173,17 +184,22 @@ hdfs.num.datanodes=1 hdfs.enable.permissions=false hdfs.format=true hdfs.enable.running.user.as.proxy.user=true -hdfs.datanode.address=127.0.0.1:50010 -hdfs.datanode.http.address=127.0.0.1:50075 -hdfs.datanode.ipc.address=127.0.0.1:50020 +hdfs.datanode.address=0.0.0.0:50010 +hdfs.datanode.http.address=0.0.0.0:50075 +hdfs.datanode.ipc.address=0.0.0.0:50020 # HDFS Test hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=127.0.0.1 +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 + # HDFS3 -hdfs3.namenode.host=127.0.0.1 +hdfs3.namenode.host=0.0.0.0 hdfs3.namenode.port=20112 hdfs3.namenode.http.port=50070 hdfs3.temp.dir=/embedded_hdfs @@ -191,15 +207,18 @@ hdfs3.num.datanodes=1 hdfs3.enable.permissions=false hdfs3.format=true hdfs3.enable.running.user.as.proxy.user=true -hdfs3.datanode.address=127.0.0.1:50010 -hdfs3.datanode.http.address=127.0.0.1:50075 -hdfs3.datanode.ipc.address=127.0.0.1:50020 +hdfs3.datanode.address=0.0.0.0:50010 +hdfs3.datanode.http.address=0.0.0.0:50075 +hdfs3.datanode.ipc.address=0.0.0.0:50020 # HDFS3 Test hdfs3.test.file=/tmp/testing hdfs3.test.string=TESTING - +hdfs3.namenode.client.host=127.0.0.1 +hdfs3.datanode.client.address=127.0.0.1:50010 +hdfs3.datanode.http.client.address=127.0.0.1:50075 +hdfs3.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 @@ -217,6 +236,8 @@ hbase.rest.host=0.0.0.0 hbase.rest.threads.max=100 hbase.rest.threads.min=2 +hbase.rest.client.host=127.0.0.1 + # HBase Test hbase.test.table.name=hbase_test_table hbase.test.col.family.name=cf1 @@ -224,7 +245,7 @@ hbase.test.col.qualifier.name=cq1 hbase.test.num.rows.to.put=50 # Kafka -kafka.hostname=127.0.0.1 +kafka.hostname=0.0.0.0 kafka.port=20111 # Kafka Test @@ -251,28 +272,40 @@ solr.cloud.port=8983 yarn.num.node.managers=1 yarn.num.local.dirs=1 yarn.num.log.dirs=1 -yarn.resource.manager.address=127.0.0.1:37001 -yarn.resource.manager.hostname=127.0.0.1 -yarn.resource.manager.scheduler.address=127.0.0.1:37002 -yarn.resource.manager.resource.tracker.address=127.0.0.1:37003 -yarn.resource.manager.webapp.address=127.0.0.1:37004 +yarn.resource.manager.address=0.0.0.0:37001 +yarn.resource.manager.hostname=0.0.0.0 +yarn.resource.manager.scheduler.address=0.0.0.0:37002 +yarn.resource.manager.resource.tracker.address=0.0.0.0:37003 +yarn.resource.manager.webapp.address=0.0.0.0:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 + # YARN3 yarn3.num.node.managers=1 yarn3.num.local.dirs=1 yarn3.num.log.dirs=1 -yarn3.resource.manager.address=localhost:37001 -yarn3.resource.manager.hostname=localhost -yarn3.resource.manager.scheduler.address=localhost:37002 -yarn3.resource.manager.resource.tracker.address=localhost:37003 -yarn3.resource.manager.webapp.address=localhost:37004 +yarn3.resource.manager.address=0.0.0.0:37001 +yarn3.resource.manager.hostname=0.0.0.0 +yarn3.resource.manager.scheduler.address=0.0.0.0:37002 +yarn3.resource.manager.resource.tracker.address=0.0.0.0:37003 +yarn3.resource.manager.webapp.address=0.0.0.0:37004 yarn3.use.in.jvm.container.executor=false +yarn3.resource.manager.client.address=localhost:37001 +yarn3.resource.manager.client.hostname=localhost +yarn3.resource.manager.scheduler.client.address=localhost:37002 +yarn3.resource.manager.resource.tracker.client.address=localhost:37003 +yarn3.resource.manager.webapp.client.address=localhost:37004 # MR -mr.job.history.address=127.0.0.1:37005 +mr.job.history.address=0.0.0.0:37005 +mr.job.history.client.address=127.0.0.1:37005 # Oozie oozie.tmp.dir=/oozie_tmp @@ -287,10 +320,12 @@ oozie.purge.local.share.lib.cache=false oozie.sharelib.path=~/github oozie.sharelib.name=oozie-4.2.0.2.6.5.0-292-distro.tar.gz oozie.port=20113 -oozie.host=127.0.0.1 +oozie.host=0.0.0.0 oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING,SPARK #oozie.sharelib.component=OOZIE,HCATALOG,DISTCP,MAPREDUCE_STREAMING,PIG,HIVE,HIVE2,SQOOP,SPARK +oozie.client.host=localhost + # ElasticSearch elasticsearch.version=6.7.1 elasticsearch.ip=127.0.0.1 @@ -301,23 +336,28 @@ elasticsearch.cluster.name=elasticsearch #elasticsearch.download.url=https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.4.3.zip # MongoDB -mongo.ip=127.0.0.1 +mongo.ip=0.0.0.0 mongo.port=13333 mongo.database.name=test_database mongo.collection.name=test_collection # Cassandra -cassandra.ip=127.0.0.1 +cassandra.listen.address.ip=127.0.0.1 +cassandra.rpc.address.ip=0.0.0.0 +cassandra.broadcast.address.ip=127.0.0.1 +cassandra.broadcast.rpc.address.ip=127.0.0.1 cassandra.port=13433 cassandra.temp.dir=/embedded_cassandra +cassandra.listen.address.client.ip=127.0.0.1 + # Neo4j -neo4j.ip=127.0.0.1 +neo4j.ip=0.0.0.0 neo4j.port=13533 neo4j.temp.dir=/embedded_neo4j # KNOX -knox.host=127.0.0.1 +knox.host=0.0.0.0 knox.port=8888 knox.path=gateway knox.cluster=mycluster @@ -356,22 +396,25 @@ redis.type=SERVER # Confluent confluent.schemaregistry.port=8081 -confluent.schemaregistry.host=127.0.0.1 +confluent.schemaregistry.host=0.0.0.0 confluent.schemaregistry.kafkastore.topic=_schema confluent.schemaregistry.debug=false confluent.kafka.log.dirs=/kafka-logs confluent.kafka.broker.id=0 confluent.kafka.port=22222 -confluent.kafka.host=127.0.0.1 +confluent.kafka.host=0.0.0.0 -confluent.rest.host=127.0.0.1 +confluent.rest.host=0.0.0.0 confluent.rest.port=8082 -confluent.ksql.host=127.0.0.1 +confluent.ksql.host=0.0.0.0 confluent.ksql.port=8083 - +confluent.schemaregistry.client.host=127.0.0.1 +confluent.kafka.client.host=127.0.0.1 +confluent.rest.client.host=127.0.0.1 +confluent.ksql.client.host=127.0.0.1 # Docker docker.imagename=alpine:3.2 @@ -392,12 +435,14 @@ dockercompose.local=false # Pulsar pulsar.zookeeper.temp.dir=/pulsar/embedded_zk pulsar.zookeeper.port=22020 -pulsar.ip=127.0.0.1 +pulsar.ip=0.0.0.0 pulsar.port=22022 pulsar.temp.dir=/pulsar # BookKeeper -bookkeeper.ip=127.0.0.1 +bookkeeper.ip=0.0.0.0 bookkeeper.port=31810 bookkeeper.http.port=31900 -bookkeeper.temp.dir=/bookeeper \ No newline at end of file +bookkeeper.temp.dir=/bookeeper + +bookkeeper.client.ip=127.0.0.1 diff --git a/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnBootstrap.java b/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnBootstrap.java index 3f7600ee..7d822674 100644 --- a/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnBootstrap.java +++ b/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnBootstrap.java @@ -22,6 +22,7 @@ import fr.jetoile.hadoopunit.exception.NotFoundServiceException; import org.apache.commons.configuration.Configuration; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,6 +48,7 @@ public class YarnBootstrap implements BootstrapHadoop { private String yarnRMResourceTrackerAddress; private String yarnRMWebappAddress; private boolean inJvmContainer; + private String jobHistoryAddress; public YarnBootstrap() { if (yarnLocalCluster == null) { @@ -85,10 +87,17 @@ public String getProperties() { "\n \t\t\t RM Scheduler address:" + yarnRMSchedulerAddress + "\n \t\t\t RM Resource Tracker address:" + yarnRMResourceTrackerAddress + "\n \t\t\t RM Webapp address:" + yarnRMWebappAddress + + "\n \t\t\t jobHistoryAddress" + jobHistoryAddress + "\n \t\t\t InJvmContainer:" + inJvmContainer; } private void build() throws NotFoundServiceException { + org.apache.hadoop.conf.Configuration configuration = new org.apache.hadoop.conf.Configuration(); + configuration.set(JHAdminConfig.MR_HISTORY_ADDRESS, jobHistoryAddress); + configuration.set(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, "true"); + configuration.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*"); + configuration.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*"); + yarnLocalCluster = new YarnLocalCluster.Builder() .setNumNodeManagers(yarnNumNodeManagers) .setNumLocalDirs(yarnNumLocalDirs) @@ -99,7 +108,7 @@ private void build() throws NotFoundServiceException { .setResourceManagerResourceTrackerAddress(yarnRMResourceTrackerAddress) .setResourceManagerWebappAddress(yarnRMWebappAddress) .setUseInJvmContainerExecutor(inJvmContainer) - .setConfig(new org.apache.hadoop.conf.Configuration()) + .setConfig(configuration) .build(); } @@ -114,6 +123,8 @@ private void loadConfig() throws BootstrapException, NotFoundServiceException { yarnRMResourceTrackerAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY); yarnRMWebappAddress = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY); inJvmContainer = configuration.getBoolean(YarnConfig.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY); + + jobHistoryAddress = configuration.getString(YarnConfig.MR_JOB_HISTORY_ADDRESS_KEY); } @Override @@ -145,6 +156,9 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(YarnConfig.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY))) { inJvmContainer = Boolean.parseBoolean(configs.get(YarnConfig.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY)); } + if (StringUtils.isNotEmpty(configs.get(YarnConfig.MR_JOB_HISTORY_ADDRESS_KEY))) { + jobHistoryAddress = configs.get(YarnConfig.MR_JOB_HISTORY_ADDRESS_KEY); + } } @Override diff --git a/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnConfig.java b/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnConfig.java index c182f783..81cb1f84 100644 --- a/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnConfig.java +++ b/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnConfig.java @@ -29,5 +29,12 @@ public class YarnConfig { // MR public static final String MR_JOB_HISTORY_ADDRESS_KEY = "mr.job.history.address"; + public static final String YARN_RESOURCE_MANAGER_ADDRESS_CLIENT_KEY = "yarn.resource.manager.client.address"; + public static final String YARN_RESOURCE_MANAGER_HOSTNAME_CLIENT_KEY = "yarn.resource.manager.client.hostname"; + public static final String YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_CLIENT_KEY = "yarn.resource.manager.scheduler.client.address"; + public static final String YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_CLIENT_KEY = "yarn.resource.manager.webapp.client.address"; + public static final String YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_CLIENT_KEY = "yarn.resource.manager.resource.tracker.client.address"; + public static final String MR_JOB_HISTORY_ADDRESS_CLIENT_KEY = "mr.job.history.client.address"; + private YarnConfig() {} } diff --git a/hadoop-unit-yarn/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-yarn/src/main/resources/hadoop-unit-default.properties index 29fd49f9..a71aff61 100644 --- a/hadoop-unit-yarn/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-yarn/src/main/resources/hadoop-unit-default.properties @@ -97,9 +97,17 @@ yarn.resource.manager.resource.tracker.address=localhost:37003 yarn.resource.manager.webapp.address=localhost:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 + # MR mr.job.history.address=localhost:37005 +mr.job.history.client.address=localhost:37005 + # Oozie oozie.tmp.dir=/oozie_tmp oozie.test.dir=/embedded_oozie diff --git a/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrapTest.java b/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrapTest.java index b1c3e793..3d45a0a3 100644 --- a/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrapTest.java +++ b/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrapTest.java @@ -47,10 +47,10 @@ public void testYarnLocalClusterIntegrationTest() { args[0] = "whoami"; args[1] = "1"; args[2] = getClass().getClassLoader().getResource("simple-yarn-app-1.1.0.jar").toString(); - args[3] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY); - args[4] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_HOSTNAME_KEY); - args[5] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY); - args[6] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY); + args[3] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_ADDRESS_CLIENT_KEY); + args[4] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_HOSTNAME_CLIENT_KEY); + args[5] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_CLIENT_KEY); + args[6] = configuration.getString(YarnConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_CLIENT_KEY); try { diff --git a/hadoop-unit-yarn/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-yarn/src/test/resources/hadoop-unit-default.properties index 29fd49f9..a71aff61 100644 --- a/hadoop-unit-yarn/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-yarn/src/test/resources/hadoop-unit-default.properties @@ -97,9 +97,17 @@ yarn.resource.manager.resource.tracker.address=localhost:37003 yarn.resource.manager.webapp.address=localhost:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 + # MR mr.job.history.address=localhost:37005 +mr.job.history.client.address=localhost:37005 + # Oozie oozie.tmp.dir=/oozie_tmp oozie.test.dir=/embedded_oozie diff --git a/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Bootstrap.java b/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Bootstrap.java index c0c91728..3d349a3d 100644 --- a/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Bootstrap.java +++ b/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Bootstrap.java @@ -22,6 +22,7 @@ import fr.jetoile.hadoopunit.exception.NotFoundServiceException; import org.apache.commons.configuration.Configuration; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,6 +48,7 @@ public class Yarn3Bootstrap implements BootstrapHadoop3 { private String yarnRMResourceTrackerAddress; private String yarnRMWebappAddress; private boolean inJvmContainer; + private String jobHistoryAddress; public Yarn3Bootstrap() { if (yarnLocalCluster == null) { @@ -85,10 +87,18 @@ public String getProperties() { "\n \t\t\t RM Scheduler address:" + yarnRMSchedulerAddress + "\n \t\t\t RM Resource Tracker address:" + yarnRMResourceTrackerAddress + "\n \t\t\t RM Webapp address:" + yarnRMWebappAddress + + "\n \t\t\t jobHistoryAddress" + jobHistoryAddress + "\n \t\t\t InJvmContainer:" + inJvmContainer; } private void build() throws NotFoundServiceException { + org.apache.hadoop.conf.Configuration configuration = new org.apache.hadoop.conf.Configuration(); + + configuration.set(JHAdminConfig.MR_HISTORY_ADDRESS, jobHistoryAddress); + configuration.set(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, "true"); + configuration.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*"); + configuration.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*"); + yarnLocalCluster = new YarnLocalCluster.Builder() .setNumNodeManagers(yarnNumNodeManagers) .setNumLocalDirs(yarnNumLocalDirs) @@ -99,7 +109,7 @@ private void build() throws NotFoundServiceException { .setResourceManagerResourceTrackerAddress(yarnRMResourceTrackerAddress) .setResourceManagerWebappAddress(yarnRMWebappAddress) .setUseInJvmContainerExecutor(inJvmContainer) - .setConfig(new org.apache.hadoop.conf.Configuration()) + .setConfig(configuration) .build(); } @@ -114,6 +124,8 @@ private void loadConfig() throws BootstrapException, NotFoundServiceException { yarnRMResourceTrackerAddress = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY); yarnRMWebappAddress = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY); inJvmContainer = configuration.getBoolean(Yarn3Config.YARN3_USE_IN_JVM_CONTAINER_EXECUTOR_KEY); + + jobHistoryAddress = configuration.getString(Yarn3Config.MR_JOB_HISTORY_ADDRESS_KEY); } @Override @@ -145,6 +157,9 @@ public void loadConfig(Map configs) { if (StringUtils.isNotEmpty(configs.get(Yarn3Config.YARN3_USE_IN_JVM_CONTAINER_EXECUTOR_KEY))) { inJvmContainer = Boolean.parseBoolean(configs.get(Yarn3Config.YARN3_USE_IN_JVM_CONTAINER_EXECUTOR_KEY)); } + if (StringUtils.isNotEmpty(configs.get(Yarn3Config.MR_JOB_HISTORY_ADDRESS_KEY))) { + jobHistoryAddress = configs.get(Yarn3Config.MR_JOB_HISTORY_ADDRESS_KEY); + } } @Override diff --git a/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Config.java b/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Config.java index 3acf86ab..0cafb1b2 100644 --- a/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Config.java +++ b/hadoop-unit-yarn3/src/main/java/fr/jetoile/hadoopunit/component/Yarn3Config.java @@ -29,5 +29,12 @@ public class Yarn3Config { // MR public static final String MR_JOB_HISTORY_ADDRESS_KEY = "mr.job.history.address"; + public static final String YARN3_RESOURCE_MANAGER_ADDRESS_CLIENT_KEY = "yarn3.resource.manager.client.address"; + public static final String YARN3_RESOURCE_MANAGER_HOSTNAME_CLIENT_KEY = "yarn3.resource.manager.client.hostname"; + public static final String YARN3_RESOURCE_MANAGER_SCHEDULER_ADDRESS_CLIENT_KEY = "yarn3.resource.manager.scheduler.client.address"; + public static final String YARN3_RESOURCE_MANAGER_WEBAPP_ADDRESS_CLIENT_KEY = "yarn3.resource.manager.webapp.client.address"; + public static final String YARN3_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_CLIENT_KEY = "yarn3.resource.manager.resource.tracker.client.address"; + public static final String MR_JOB_HISTORY_ADDRESS_CLIENT_KEY = "mr.job.history.client.address"; + private Yarn3Config() {} } diff --git a/hadoop-unit-yarn3/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-yarn3/src/main/resources/hadoop-unit-default.properties index 4cbc67d4..9be6c2c3 100644 --- a/hadoop-unit-yarn3/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-yarn3/src/main/resources/hadoop-unit-default.properties @@ -97,6 +97,11 @@ yarn.resource.manager.resource.tracker.address=localhost:37003 yarn.resource.manager.webapp.address=localhost:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 # YARN3 yarn3.num.node.managers=1 @@ -109,9 +114,15 @@ yarn3.resource.manager.resource.tracker.address=localhost:37003 yarn3.resource.manager.webapp.address=localhost:37004 yarn3.use.in.jvm.container.executor=false +yarn3.resource.manager.client.address=localhost:37001 +yarn3.resource.manager.client.hostname=localhost +yarn3.resource.manager.scheduler.client.address=localhost:37002 +yarn3.resource.manager.resource.tracker.client.address=localhost:37003 +yarn3.resource.manager.webapp.client.address=localhost:37004 # MR mr.job.history.address=localhost:37005 +mr.job.history.client.address=127.0.0.1:37005 # Oozie oozie.tmp.dir=/oozie_tmp diff --git a/hadoop-unit-yarn3/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrap3Test.java b/hadoop-unit-yarn3/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrap3Test.java index b173a89b..8560d482 100644 --- a/hadoop-unit-yarn3/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrap3Test.java +++ b/hadoop-unit-yarn3/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrap3Test.java @@ -47,10 +47,10 @@ public void testYarnLocalClusterIntegrationTest() { args[0] = "whoami"; args[1] = "1"; args[2] = getClass().getClassLoader().getResource("simple-yarn-app-1.1.0.jar").toString(); - args[3] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_ADDRESS_KEY); - args[4] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_HOSTNAME_KEY); - args[5] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY); - args[6] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY); + args[3] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_ADDRESS_CLIENT_KEY); + args[4] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_HOSTNAME_CLIENT_KEY); + args[5] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_SCHEDULER_ADDRESS_CLIENT_KEY); + args[6] = configuration.getString(Yarn3Config.YARN3_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_CLIENT_KEY); try { diff --git a/hadoop-unit-zookeeper/src/main/java/fr/jetoile/hadoopunit/component/ZookeeperConfig.java b/hadoop-unit-zookeeper/src/main/java/fr/jetoile/hadoopunit/component/ZookeeperConfig.java index d22b5bf5..b36780e1 100644 --- a/hadoop-unit-zookeeper/src/main/java/fr/jetoile/hadoopunit/component/ZookeeperConfig.java +++ b/hadoop-unit-zookeeper/src/main/java/fr/jetoile/hadoopunit/component/ZookeeperConfig.java @@ -27,5 +27,6 @@ public class ZookeeperConfig { public static final String ZOOKEEPER_MAX_CLIENT_CNXNS_KEY = "zookeeper.max.client.cnxns"; public static final String ZOOKEEPER_CONNECTION_STRING_KEY = "zookeeper.connection.string"; + public static final String ZOOKEEPER_HOST_CLIENT_KEY = "zookeeper.client.host"; private ZookeeperConfig() {} } diff --git a/hadoop-unit-zookeeper/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-zookeeper/src/main/resources/hadoop-unit-default.properties index c0077bee..b9f4fb2c 100644 --- a/hadoop-unit-zookeeper/src/main/resources/hadoop-unit-default.properties +++ b/hadoop-unit-zookeeper/src/main/resources/hadoop-unit-default.properties @@ -6,6 +6,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir diff --git a/hadoop-unit-zookeeper/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-zookeeper/src/test/resources/hadoop-unit-default.properties index c0077bee..b9f4fb2c 100644 --- a/hadoop-unit-zookeeper/src/test/resources/hadoop-unit-default.properties +++ b/hadoop-unit-zookeeper/src/test/resources/hadoop-unit-default.properties @@ -6,6 +6,7 @@ tmp.dir.path=/tmp zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 # Hive hive.scratch.dir=/hive_scratch_dir diff --git a/sample/confluent-integrationtest/src/test/resources/hadoop-unit-default.properties b/sample/confluent-integrationtest/src/test/resources/hadoop-unit-default.properties index 5f4a8e75..50baebf1 100644 --- a/sample/confluent-integrationtest/src/test/resources/hadoop-unit-default.properties +++ b/sample/confluent-integrationtest/src/test/resources/hadoop-unit-default.properties @@ -7,17 +7,20 @@ zookeeper.temp.dir=/embedded_zk zookeeper.host=127.0.0.1 zookeeper.port=22010 +zookeeper.client.host=127.0.0.1 + + # Hive hive.scratch.dir=/hive_scratch_dir hive.warehouse.dir=/tmp/warehouse_dir # Hive Metastore -hive.metastore.hostname=localhost +hive.metastore.hostname=127.0.0.1 hive.metastore.port=20102 hive.metastore.derby.db.dir=/metastore_db # Hive Server2 -hive.server2.hostname=localhost +hive.server2.hostname=127.0.0.1 hive.server2.port=20103 # Hive Test @@ -25,8 +28,26 @@ hive.test.database.name=default hive.test.table.name=test_table +# Hive3 +hive3.scratch.dir=/hive_scratch_dir +hive3.warehouse.dir=/tmp/warehouse_dir + +# Hive Metastore 3 +hive3.metastore.hostname=localhost +hive3.metastore.port=20102 +hive3.metastore.derby.db.dir=metastore_db + +# Hive Server2 3 +hive3.server2.hostname=localhost +hive3.server2.port=20103 + +# Hive Test 3 +hive3.test.database.name=default +hive3.test.table.name=test_table + + # HDFS -hdfs.namenode.host=localhost +hdfs.namenode.host=127.0.0.1 hdfs.namenode.port=20112 hdfs.namenode.http.port=50070 hdfs.temp.dir=/embedded_hdfs @@ -34,11 +55,41 @@ hdfs.num.datanodes=1 hdfs.enable.permissions=false hdfs.format=true hdfs.enable.running.user.as.proxy.user=true +hdfs.datanode.address=127.0.0.1:50010 +hdfs.datanode.http.address=127.0.0.1:50075 +hdfs.datanode.ipc.address=127.0.0.1:50020 # HDFS Test hdfs.test.file=/tmp/testing hdfs.test.string=TESTING +hdfs.namenode.client.host=127.0.0.1 +hdfs.datanode.client.address=127.0.0.1:50010 +hdfs.datanode.http.client.address=127.0.0.1:50075 +hdfs.datanode.ipc.client.address=127.0.0.1:50020 + + +# HDFS3 +hdfs3.namenode.host=127.0.0.1 +hdfs3.namenode.port=20112 +hdfs3.namenode.http.port=50070 +hdfs3.temp.dir=/embedded_hdfs +hdfs3.num.datanodes=1 +hdfs3.enable.permissions=false +hdfs3.format=true +hdfs3.enable.running.user.as.proxy.user=true +hdfs3.datanode.address=127.0.0.1:50010 +hdfs3.datanode.http.address=127.0.0.1:50075 +hdfs3.datanode.ipc.address=127.0.0.1:50020 + +# HDFS3 Test +hdfs3.test.file=/tmp/testing +hdfs3.test.string=TESTING + +hdfs3.namenode.client.host=127.0.0.1 +hdfs3.datanode.client.address=127.0.0.1:50010 +hdfs3.datanode.http.client.address=127.0.0.1:50075 +hdfs3.datanode.ipc.client.address=127.0.0.1:50020 # HBase hbase.master.port=25111 @@ -56,6 +107,8 @@ hbase.rest.host=0.0.0.0 hbase.rest.threads.max=100 hbase.rest.threads.min=2 +hbase.rest.client.host=127.0.0.1 + # HBase Test hbase.test.table.name=hbase_test_table hbase.test.col.family.name=cf1 @@ -90,15 +143,40 @@ solr.cloud.port=8983 yarn.num.node.managers=1 yarn.num.local.dirs=1 yarn.num.log.dirs=1 -yarn.resource.manager.address=localhost:37001 -yarn.resource.manager.hostname=localhost -yarn.resource.manager.scheduler.address=localhost:37002 -yarn.resource.manager.resource.tracker.address=localhost:37003 -yarn.resource.manager.webapp.address=localhost:37004 +yarn.resource.manager.address=127.0.0.1:37001 +yarn.resource.manager.hostname=127.0.0.1 +yarn.resource.manager.scheduler.address=127.0.0.1:37002 +yarn.resource.manager.resource.tracker.address=127.0.0.1:37003 +yarn.resource.manager.webapp.address=127.0.0.1:37004 yarn.use.in.jvm.container.executor=false +yarn.resource.manager.client.address=localhost:37001 +yarn.resource.manager.client.hostname=localhost +yarn.resource.manager.scheduler.client.address=localhost:37002 +yarn.resource.manager.resource.tracker.client.address=localhost:37003 +yarn.resource.manager.webapp.client.address=localhost:37004 + + +# YARN3 +yarn3.num.node.managers=1 +yarn3.num.local.dirs=1 +yarn3.num.log.dirs=1 +yarn3.resource.manager.address=localhost:37001 +yarn3.resource.manager.hostname=localhost +yarn3.resource.manager.scheduler.address=localhost:37002 +yarn3.resource.manager.resource.tracker.address=localhost:37003 +yarn3.resource.manager.webapp.address=localhost:37004 +yarn3.use.in.jvm.container.executor=false + +yarn3.resource.manager.client.address=localhost:37001 +yarn3.resource.manager.client.hostname=localhost +yarn3.resource.manager.scheduler.client.address=localhost:37002 +yarn3.resource.manager.resource.tracker.client.address=localhost:37003 +yarn3.resource.manager.webapp.client.address=localhost:37004 + # MR -mr.job.history.address=localhost:37005 +mr.job.history.address=127.0.0.1:37005 +mr.job.history.client.address=127.0.0.1:37005 # Oozie oozie.tmp.dir=/oozie_tmp @@ -110,15 +188,17 @@ oozie.hdfs.share.lib.dir=/tmp/share_lib oozie.share.lib.create=true oozie.local.share.lib.cache.dir=/tmp/share_lib_cache oozie.purge.local.share.lib.cache=false -oozie.sharelib.path=/home/khanh/github -oozie.sharelib.name=oozie-4.2.0.2.6.1.0-129-distro.tar.gz +oozie.sharelib.path=~/github +oozie.sharelib.name=oozie-4.2.0.2.6.5.0-292-distro.tar.gz oozie.port=20113 -oozie.host=localhost +oozie.host=127.0.0.1 oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING,SPARK #oozie.sharelib.component=OOZIE,HCATALOG,DISTCP,MAPREDUCE_STREAMING,PIG,HIVE,HIVE2,SQOOP,SPARK +oozie.client.host=localhost + # ElasticSearch -elasticsearch.version=6.2.4 +elasticsearch.version=6.7.1 elasticsearch.ip=127.0.0.1 elasticsearch.http.port=14433 elasticsearch.tcp.port=14533 @@ -143,7 +223,7 @@ neo4j.port=13533 neo4j.temp.dir=/embedded_neo4j # KNOX -knox.host=localhost +knox.host=127.0.0.1 knox.port=8888 knox.path=gateway knox.cluster=mycluster @@ -153,8 +233,8 @@ knox.service=namenode,webhdfs,webhbase # Alluxio #alluxio.work.dir=/tmp/alluxio -alluxio.work.dir=hdfs://localhost:20112/alluxio -alluxio.hostname=localhost +alluxio.work.dir=hdfs://127.0.0.1:20112/alluxio +alluxio.hostname=127.0.0.1 alluxio.master.port=14001 alluxio.master.web.port=14002 alluxio.proxy.web.port=14100 @@ -167,7 +247,7 @@ alluxio.webapp.directory=conf/alluxio/webapp # Redis redis.port=6379 redis.download.url=http://download.redis.io/releases/ -redis.version=4.0.0 +redis.version=5.0.4 redis.cleanup.installation=false redis.temp.dir=/redis redis.type=SERVER @@ -195,4 +275,35 @@ confluent.rest.host=127.0.0.1 confluent.rest.port=8082 confluent.ksql.host=127.0.0.1 -confluent.ksql.port=8083 \ No newline at end of file +confluent.ksql.port=8083 + +# Docker +docker.imagename=alpine:3.2 +docker.exposedports=80 +docker.envs=MAGIC_NUMBER:42 +docker.labels=MAGIC_NUMBER:42 +docker.command=/bin/sh, -c, while true; do echo \"$MAGIC_NUMBER\" | nc -l -p 80; done +docker.fixed.exposedports=21300:80 +#docker.classpath.resources.mapping=hadoop-unit-default.properties:/hadoop-unit-default.properties:READ_ONLY + +# Docker compose +dockercompose.filename=conf/docker-compose.yml +#dockercompose.exposedports=zoo:2181,resourcemanager:8088 +dockercompose.local=false + + + +# Pulsar +pulsar.zookeeper.temp.dir=/pulsar/embedded_zk +pulsar.zookeeper.port=22020 +pulsar.ip=127.0.0.1 +pulsar.port=22022 +pulsar.temp.dir=/pulsar + +# BookKeeper +bookkeeper.ip=127.0.0.1 +bookkeeper.port=31810 +bookkeeper.http.port=31900 +bookkeeper.temp.dir=/bookeeper + +bookkeeper.client.ip=127.0.0.1