diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-hdfs/pom.xml b/hadoop-bootstrap-client/hadoop-bootstrap-client-hdfs/pom.xml
new file mode 100755
index 00000000..b7037b85
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-hdfs/pom.xml
@@ -0,0 +1,111 @@
+
+
+
+ hadoop-bootstrap-client
+ fr.jetoile.sample
+ 1.1-SNAPSHOT
+
+ 4.0.0
+
+ hadoop-bootstrap-client-hdfs
+
+
+
+ 1.2.1
+
+
+
+
+
+
+
+ fr.jetoile.sample
+ hadoop-bootstrap-commons
+
+
+
+
+ org.apache.hadoop
+ hadoop-client
+
+
+ servlet-api
+ javax.servlet
+
+
+
+
+
+
+
+
+
+
+ org.apache.hadoop
+ hadoop-common
+
+
+
+
+ org.apache.hive
+ hive-jdbc
+
+
+
+ commons-configuration
+ commons-configuration
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+ ch.qos.logback
+ logback-classic
+
+
+
+ com.ninja-squad
+ DbSetup
+ ${dbSetup.version}
+
+
+
+
+ junit
+ junit
+ 4.11
+ test
+
+
+ org.easytesting
+ fest-assert
+ 1.4
+ test
+
+
+ org.mockito
+ mockito-all
+ 1.8.5
+ test
+
+
+
+
+
+
+
+
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-hdfs/src/main/java/fr/jetoile/hadoop/test/hdfs/HdfsUtils.java b/hadoop-bootstrap-client/hadoop-bootstrap-client-hdfs/src/main/java/fr/jetoile/hadoop/test/hdfs/HdfsUtils.java
new file mode 100644
index 00000000..5268fa81
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-hdfs/src/main/java/fr/jetoile/hadoop/test/hdfs/HdfsUtils.java
@@ -0,0 +1,61 @@
+package fr.jetoile.hadoop.test.hdfs;
+
+
+import fr.jetoile.sample.Config;
+import fr.jetoile.sample.exception.ConfigException;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
+import org.apache.hadoop.fs.FileSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+
+public enum HdfsUtils {
+ INSTANCE;
+
+ private final Logger LOGGER = LoggerFactory.getLogger(HdfsUtils.class);
+
+ private FileSystem fileSystem = null;
+
+ private Configuration configuration;
+ private int port;
+ private int httpPort;
+
+ HdfsUtils() {
+ try {
+ loadConfig();
+ } catch (ConfigException e) {
+ System.exit(-1);
+ }
+ org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
+ conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
+
+ URI uri = URI.create("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
+
+ try {
+ fileSystem = FileSystem.get(uri, conf);
+ } catch (IOException e) {
+ LOGGER.error("unable to create FileSystem", e);
+ }
+ }
+
+ private void loadConfig() throws ConfigException {
+ try {
+ configuration = new PropertiesConfiguration("default.properties");
+ } catch (ConfigurationException e) {
+ throw new ConfigException("bad config", e);
+ }
+
+ port = configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY);
+ httpPort = configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY);
+ }
+
+
+ public FileSystem getFileSystem() {
+ return fileSystem;
+ }
+
+}
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/pom.xml b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/pom.xml
new file mode 100755
index 00000000..0deac59e
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/pom.xml
@@ -0,0 +1,108 @@
+
+
+
+ hadoop-bootstrap-client
+ fr.jetoile.sample
+ 1.1-SNAPSHOT
+
+ 4.0.0
+
+ hadoop-bootstrap-client-hive
+
+
+
+ 1.2.1
+
+ 5.2.1
+ 4.2.0.2.3.2.0-2950
+
+
+
+
+
+
+
+ fr.jetoile.sample
+ hadoop-bootstrap-commons
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ commons-configuration
+ commons-configuration
+
+
+
+ commons-collections
+ commons-collections
+
+
+
+ commons-io
+ commons-io
+
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+ ch.qos.logback
+ logback-classic
+
+
+
+ com.ninja-squad
+ DbSetup
+ ${dbSetup.version}
+
+
+
+ org.projectlombok
+ lombok
+ provided
+
+
+
+
+ junit
+ junit
+ 4.11
+ test
+
+
+ org.easytesting
+ fest-assert
+ 1.4
+ test
+
+
+ org.mockito
+ mockito-all
+ 1.8.5
+ test
+
+
+
+
+ org.apache.hive
+ hive-jdbc
+ ${hive.version}
+
+
+
+
+
+
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveConnectionUtils.java b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveConnectionUtils.java
new file mode 100644
index 00000000..e502e5ec
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveConnectionUtils.java
@@ -0,0 +1,66 @@
+package fr.jetoile.hadoop.test.hive;
+
+import com.ninja_squad.dbsetup.destination.Destination;
+import com.ninja_squad.dbsetup.destination.DriverManagerDestination;
+import fr.jetoile.sample.Config;
+import fr.jetoile.sample.exception.ConfigException;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+/**
+ * Utility class when using a default.properties file which contains connection's parameters
+ */
+public enum HiveConnectionUtils {
+ INSTANCE;
+
+ final private Logger LOGGER = LoggerFactory.getLogger(HiveConnectionUtils.class);
+
+ private DriverManagerDestination driverManagerDestination;
+ private Configuration configuration;
+ private String databaseName;
+ private String host;
+ private int port;
+
+ HiveConnectionUtils() {
+ try {
+ loadConfig();
+ } catch (ConfigException e) {
+ System.exit(-1);
+ }
+ driverManagerDestination = new DriverManagerDestination(
+ "jdbc:hive2://" + host + ":" + port + "/" + databaseName,
+ "user",
+ "pass");
+ }
+
+ private void loadConfig() throws ConfigException {
+ try {
+ configuration = new PropertiesConfiguration("default.properties");
+ } catch (ConfigurationException e) {
+ throw new ConfigException("bad config", e);
+ }
+
+ port = configuration.getInt(Config.HIVE_SERVER2_PORT_KEY);
+ host = configuration.getString(Config.HIVE_SERVER2_HOSTNAME_KEY);
+ databaseName = configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY);
+ }
+
+ public Destination getDestination() {
+ return driverManagerDestination;
+ }
+
+ public Connection getConnection() {
+ try {
+ return driverManagerDestination.getConnection();
+ } catch (SQLException e) {
+ LOGGER.error("unable to create hive connection", e);
+ return null;
+ }
+ }
+}
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveDriverManagerDestination.java b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveDriverManagerDestination.java
new file mode 100644
index 00000000..d81566d4
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveDriverManagerDestination.java
@@ -0,0 +1,103 @@
+/*
+ * The MIT License
+ *
+ * Copyright (c) 2012, Ninja Squad
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+package fr.jetoile.hadoop.test.hive;
+
+import com.ninja_squad.dbsetup.destination.Destination;
+import com.ninja_squad.dbsetup.util.Preconditions;
+
+import javax.annotation.Nonnull;
+import javax.annotation.concurrent.Immutable;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+/**
+ * A destination which uses the {@link DriverManager} to get a connection
+ * @author JB Nizet
+ */
+@Immutable
+public final class HiveDriverManagerDestination implements Destination {
+
+ private final String url;
+ private final String user;
+ private final String password;
+
+ /**
+ * Constructor
+ * @param url the URL of the database
+ * @param user the user used to get a connection
+ * @param password the password used to get a connection
+ */
+ public HiveDriverManagerDestination(@Nonnull String url, String user, String password) {
+ Preconditions.checkNotNull(url, "url may not be null");
+ this.url = url;
+ this.user = user;
+ this.password = password;
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ try {
+ Class.forName("org.apache.hive.jdbc.HiveDriver");
+ } catch (ClassNotFoundException e) {
+ e.printStackTrace();
+ }
+ return DriverManager.getConnection(url, user, password);
+ }
+
+ @Override
+ public String toString() {
+ return "DriverManagerDestination [url="
+ + url
+ + ", user="
+ + user
+ + ", password="
+ + password
+ + "]";
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + url.hashCode();
+ result = prime * result + ((password == null) ? 0 : password.hashCode());
+ result = prime * result + ((user == null) ? 0 : user.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (!(o instanceof HiveDriverManagerDestination)) return false;
+
+ HiveDriverManagerDestination that = (HiveDriverManagerDestination) o;
+
+ if (url != null ? !url.equals(that.url) : that.url != null) return false;
+ if (user != null ? !user.equals(that.user) : that.user != null) return false;
+ return password != null ? password.equals(that.password) : that.password == null;
+
+ }
+}
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveSetup.java b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveSetup.java
new file mode 100644
index 00000000..2829b152
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/java/fr/jetoile/hadoop/test/hive/HiveSetup.java
@@ -0,0 +1,111 @@
+package fr.jetoile.hadoop.test.hive;
+
+import com.ninja_squad.dbsetup.DbSetupRuntimeException;
+import com.ninja_squad.dbsetup.bind.BinderConfiguration;
+import com.ninja_squad.dbsetup.bind.DefaultBinderConfiguration;
+import com.ninja_squad.dbsetup.destination.Destination;
+import com.ninja_squad.dbsetup.operation.Operation;
+import com.ninja_squad.dbsetup.util.Preconditions;
+
+import javax.annotation.Nonnull;
+import java.sql.Connection;
+import java.sql.SQLException;
+
+public class HiveSetup {
+ private final Destination destination;
+ private final Operation operation;
+ private final BinderConfiguration binderConfiguration;
+
+ /**
+ * Constructor which uses the {@link DefaultBinderConfiguration#INSTANCE default binder configuration}.
+ * @param destination the destination of the sequence of database operations
+ * @param operation the operation to execute (most of the time, an instance of
+ * {@link com.ninja_squad.dbsetup.operation.CompositeOperation}
+ */
+ public HiveSetup(@Nonnull Destination destination, @Nonnull Operation operation) {
+ this(destination, operation, DefaultBinderConfiguration.INSTANCE);
+ }
+
+ /**
+ * Constructor allowing to use a custom {@link BinderConfiguration}.
+ * @param destination the destination of the sequence of database operations
+ * @param operation the operation to execute (most of the time, an instance of
+ * {@link com.ninja_squad.dbsetup.operation.CompositeOperation}
+ * @param binderConfiguration the binder configuration to use.
+ */
+ public HiveSetup(@Nonnull Destination destination,
+ @Nonnull Operation operation,
+ @Nonnull BinderConfiguration binderConfiguration) {
+ Preconditions.checkNotNull(destination, "destination may not be null");
+ Preconditions.checkNotNull(operation, "operation may not be null");
+ Preconditions.checkNotNull(binderConfiguration, "binderConfiguration may not be null");
+
+ this.destination = destination;
+ this.operation = operation;
+ this.binderConfiguration = binderConfiguration;
+ }
+
+ /**
+ * Executes the sequence of operations. All the operations use the same connection, and are grouped
+ * in a single transaction. The transaction is rolled back if any exception occurs.
+ */
+ public void launch() {
+ try {
+ Connection connection = destination.getConnection();
+ try {
+// connection.setAutoCommit(false);
+ operation.execute(connection, binderConfiguration);
+// connection.commit();
+ }
+ catch (SQLException e) {
+// connection.rollback();
+ throw e;
+ }
+ catch (RuntimeException e) {
+// connection.rollback();
+ throw e;
+ }
+ finally {
+ connection.close();
+ }
+ }
+ catch (SQLException e) {
+ throw new DbSetupRuntimeException(e);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "HiveSetup [destination="
+ + destination
+ + ", operation="
+ + operation
+ + ", binderConfiguration="
+ + binderConfiguration
+ + "]";
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + binderConfiguration.hashCode();
+ result = prime * result + destination.hashCode();
+ result = prime * result + operation.hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (!(o instanceof HiveSetup)) return false;
+
+ HiveSetup hiveSetup = (HiveSetup) o;
+
+ if (destination != null ? !destination.equals(hiveSetup.destination) : hiveSetup.destination != null)
+ return false;
+ if (operation != null ? !operation.equals(hiveSetup.operation) : hiveSetup.operation != null) return false;
+ return binderConfiguration != null ? binderConfiguration.equals(hiveSetup.binderConfiguration) : hiveSetup.binderConfiguration == null;
+
+ }
+}
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/test/java/fr/jetoile/hadoop/test/hive/HiveSetupTest.java b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/test/java/fr/jetoile/hadoop/test/hive/HiveSetupTest.java
new file mode 100644
index 00000000..8a049cb2
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-hive/src/main/test/java/fr/jetoile/hadoop/test/hive/HiveSetupTest.java
@@ -0,0 +1,45 @@
+package fr.jetoile.hadoop.test.hive;
+
+import com.ninja_squad.dbsetup.Operations;
+import com.ninja_squad.dbsetup.destination.Destination;
+import com.ninja_squad.dbsetup.operation.Operation;
+import org.junit.After;
+import org.junit.Before;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+import static com.ninja_squad.dbsetup.Operations.sequenceOf;
+import static com.ninja_squad.dbsetup.Operations.sql;
+
+/**
+ * User: khanh
+ * To change this template use File | Settings | File Templates.
+ */
+public class HiveSetupTest {
+ public static final Operation CREATE_TABLES =
+ sequenceOf(sql("CREATE EXTERNAL TABLE IF NOT EXISTS default.test(id INT, value STRING) " +
+ " ROW FORMAT DELIMITED FIELDS TERMINATED BY ';'" +
+ " STORED AS TEXTFILE" +
+ " LOCATION 'hdfs://localhost:20112/khanh/test'"));
+
+ public static Destination DESTINATION = new HiveDriverManagerDestination(
+ "jdbc:hive2://localhost:20103/default",
+ "user",
+ "pass"
+ );
+
+ private Connection connection;
+
+ @Before
+ public void prepare() throws SQLException {
+ new HiveSetup(DESTINATION, Operations.sequenceOf(CREATE_TABLES)).launch();
+ connection = DESTINATION.getConnection();
+ }
+
+ @After
+ public void cleanup() throws SQLException {
+ connection.close();
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-solrcloud/pom.xml b/hadoop-bootstrap-client/hadoop-bootstrap-client-solrcloud/pom.xml
new file mode 100644
index 00000000..5416e186
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-solrcloud/pom.xml
@@ -0,0 +1,58 @@
+
+
+
+ hadoop-bootstrap-client
+ fr.jetoile.sample
+ 1.1-SNAPSHOT
+
+ 4.0.0
+
+ hadoop-bootstrap-client-solrcloud
+
+
+
+
+
+ com.lucidworks.spark
+ spark-solr
+ 1.1.2
+
+
+ org.ow2.asm
+ asm
+
+
+ org.ow2.asm
+ asm-commons
+
+
+ spark-mllib_2.10
+ org.apache.spark
+
+
+ spark-streaming-twitter_2.10
+ org.apache.spark
+
+
+ org.scala-lang
+ scala-library
+
+
+ hadoop-hdfs
+ org.apache.hadoop
+
+
+ hadoop-client
+ org.apache.hadoop
+
+
+ solr-core
+ org.apache.solr
+
+
+
+
+
+
\ No newline at end of file
diff --git a/hadoop-bootstrap-client/hadoop-bootstrap-client-spark/pom.xml b/hadoop-bootstrap-client/hadoop-bootstrap-client-spark/pom.xml
new file mode 100644
index 00000000..b58ac77e
--- /dev/null
+++ b/hadoop-bootstrap-client/hadoop-bootstrap-client-spark/pom.xml
@@ -0,0 +1,43 @@
+
+
+
+ hadoop-bootstrap-client
+ fr.jetoile.sample
+ 1.1-SNAPSHOT
+
+ 4.0.0
+
+ hadoop-bootstrap-client-spark
+
+
+
+
+
+ org.apache.spark
+ spark-core_2.10
+ 1.6.0
+
+
+ hadoop-client
+ org.apache.hadoop
+
+
+
+
+
+ org.apache.spark
+ spark-sql_2.10
+ 1.6.0
+
+
+
+ org.apache.spark
+ spark-hive_2.10
+ 1.6.0
+
+
+
+
+
\ No newline at end of file
diff --git a/hadoop-bootstrap-client/pom.xml b/hadoop-bootstrap-client/pom.xml
new file mode 100755
index 00000000..3ab47625
--- /dev/null
+++ b/hadoop-bootstrap-client/pom.xml
@@ -0,0 +1,22 @@
+
+
+
+ hadoop-bootstrap
+ fr.jetoile.sample
+ 1.1-SNAPSHOT
+
+ 4.0.0
+
+ hadoop-bootstrap-client
+ pom
+
+
+ hadoop-bootstrap-client-hive
+ hadoop-bootstrap-client-hdfs
+ hadoop-bootstrap-client-solrcloud
+ hadoop-bootstrap-client-spark
+
+
+
+
diff --git a/hadoop-bootstrap-commons/src/main/java/fr/jetoile/sample/Config.java b/hadoop-bootstrap-commons/src/main/java/fr/jetoile/sample/Config.java
new file mode 100644
index 00000000..76382166
--- /dev/null
+++ b/hadoop-bootstrap-commons/src/main/java/fr/jetoile/sample/Config.java
@@ -0,0 +1,124 @@
+package fr.jetoile.sample;
+
+public class Config {
+ // Props file
+ public static final String DEFAULT_PROPS_FILE = "default.properties";
+
+ // Zookeeper
+ public static final String ZOOKEEPER_PORT_KEY = "zookeeper.port";
+ public static final String ZOOKEEPER_HOST_KEY = "zookeeper.host";
+ public static final String ZOOKEEPER_TEMP_DIR_KEY = "zookeeper.temp.dir";
+ public static final String ZOOKEEPER_CONNECTION_STRING_KEY = "zookeeper.connection.string";
+
+ // MongoDB
+ public static final String MONGO_IP_KEY = "mongo.ip";
+ public static final String MONGO_PORT_KEY = "mongo.port";
+ public static final String MONGO_DATABASE_NAME_KEY = "mongo.database.name";
+ public static final String MONGO_COLLECTION_NAME_KEY = "mongo.collection.name";
+
+ // ActiveMQ
+ public static final String ACTIVEMQ_HOSTNAME_KEY = "activemq.hostname";
+ public static final String ACTIVEMQ_PORT_KEY = "activemq.port";
+ public static final String ACTIVEMQ_QUEUE_NAME_KEY = "activemq.queue";
+ public static final String ACTIVEMQ_STORE_DIR_KEY = "activemq.store.dir";
+ public static final String ACTIVEMQ_URI_PREFIX_KEY = "activemq.uri.prefix";
+ public static final String ACTIVEMQ_URI_POSTFIX_KEY = "activemq.uri.postfix";
+
+ // Storm
+ public static final String STORM_ENABLE_DEBUG_KEY = "storm.enable.debug";
+ public static final String STORM_NUM_WORKERS_KEY = "storm.num.workers";
+ public static final String STORM_TOPOLOGY_NAME_KEY = "storm.topology.name";
+
+ // Hive
+ public static final String HIVE_SCRATCH_DIR_KEY = "hive.scratch.dir";
+ public static final String HIVE_WAREHOUSE_DIR_KEY = "hive.warehouse.dir";
+
+ // Hive Metastore
+ public static final String HIVE_METASTORE_HOSTNAME_KEY = "hive.metastore.hostname";
+ public static final String HIVE_METASTORE_PORT_KEY = "hive.metastore.port";
+ public static final String HIVE_METASTORE_DERBY_DB_DIR_KEY = "hive.metastore.derby.db.dir";
+
+ // Hive Server2
+ public static final String HIVE_SERVER2_HOSTNAME_KEY = "hive.server2.hostname";
+ public static final String HIVE_SERVER2_PORT_KEY = "hive.server2.port";
+
+ // Hive Test
+ public static final String HIVE_TEST_DATABASE_NAME_KEY = "hive.test.database.name";
+ public static final String HIVE_TEST_TABLE_NAME_KEY = "hive.test.table.name";
+
+ // Kafka
+ public static final String KAFKA_HOSTNAME_KEY = "kafka.hostname";
+ public static final String KAFKA_PORT_KEY = "kafka.port";
+
+ // Kafka Test
+ public static final String KAFKA_TEST_TOPIC_KEY = "kafka.test.topic";
+ public static final String KAFKA_TEST_MESSAGE_COUNT_KEY = "kafka.test.message.count";
+ public static final String KAFKA_TEST_BROKER_ID_KEY = "kafka.test.broker.id";
+ public static final String KAFKA_TEST_TEMP_DIR_KEY = "kafka.test.temp.dir";
+
+ //HDFS
+ public static final String HDFS_NAMENODE_PORT_KEY = "hdfs.namenode.port";
+ public static final String HDFS_NAMENODE_HTTP_PORT_KEY = "hdfs.namenode.http.port";
+ public static final String HDFS_TEMP_DIR_KEY = "hdfs.temp.dir";
+ public static final String HDFS_NUM_DATANODES_KEY = "hdfs.num.datanodes";
+ public static final String HDFS_ENABLE_PERMISSIONS_KEY = "hdfs.enable.permissions";
+ public static final String HDFS_FORMAT_KEY = "hdfs.format";
+ public static final String HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER = "hdfs.enable.running.user.as.proxy.user";
+
+ // HDFS Test
+ public static final String HDFS_TEST_FILE_KEY = "hdfs.test.file";
+ public static final String HDFS_TEST_STRING_KEY = "hdfs.test.string";
+
+ // HSQLDB
+ public static final String HSQLDB_HOSTNAME_KEY = "hsqldb.hostname";
+ public static final String HSQLDB_PORT_KEY = "hsqldb.port";
+ public static final String HSQLDB_DATABASE_NAME_KEY = "hsqldb.database.name";
+ public static final String HSQLDB_TEMP_DIR_KEY = "hsqldb.temp.dir";
+ public static final String HSQLDB_COMPATIBILITY_MODE_KEY = "hsqldb.compatibility.mode";
+ public static final String HSQLDB_JDBC_DRIVER_KEY = "hsqldb.jdbc.driver";
+ public static final String HSQLDB_JDBC_CONNECTION_STRING_PREFIX_KEY = "hsqldb.jdbc.connection.string.prefix";
+
+ // YARN
+ public static final String YARN_NUM_NODE_MANAGERS_KEY = "yarn.num.node.managers";
+ public static final String YARN_NUM_LOCAL_DIRS_KEY = "yarn.num.local.dirs";
+ public static final String YARN_NUM_LOG_DIRS_KEY = "yarn.num.log.dirs";
+ public static final String YARN_RESOURCE_MANAGER_ADDRESS_KEY = "yarn.resource.manager.address";
+ public static final String YARN_RESOURCE_MANAGER_HOSTNAME_KEY = "yarn.resource.manager.hostname";
+ public static final String YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY = "yarn.resource.manager.scheduler.address";
+ public static final String YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY = "yarn.resource.manager.webapp.address";
+ public static final String YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY =
+ "yarn.resource.manager.resource.tracker.address";
+ public static final String YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY = "yarn.use.in.jvm.container.executor";
+
+ // MR
+ public static final String MR_JOB_HISTORY_ADDRESS_KEY = "mr.job.history.address";
+
+ // MR Test
+ public static final String MR_TEST_DATA_FILENAME_KEY = "mr.test.data.filename";
+ public static final String MR_TEST_DATA_HDFS_INPUT_DIR_KEY = "mr.test.data.hdfs.input.dir";
+ public static final String MR_TEST_DATA_HDFS_OUTPUT_DIR_KEY = "mr.test.data.hdfs.output.dir";
+
+ // HBase
+ public static final String HBASE_MASTER_PORT_KEY = "hbase.master.port";
+ public static final String HBASE_MASTER_INFO_PORT_KEY = "hbase.master.info.port";
+ public static final String HBASE_NUM_REGION_SERVERS_KEY = "hbase.num.region.servers";
+ public static final String HBASE_ROOT_DIR_KEY = "hbase.root.dir";
+ public static final String HBASE_ZNODE_PARENT_KEY = "hbase.znode.parent";
+ public static final String HBASE_WAL_REPLICATION_ENABLED_KEY = "hbase.wal.replication.enabled";
+
+ // HBase Test
+ public static final String HBASE_TEST_TABLE_NAME_KEY = "hbase.test.table.name";
+ public static final String HBASE_TEST_COL_FAMILY_NAME_KEY = "hbase.test.col.family.name";
+ public static final String HBASE_TEST_COL_QUALIFIER_NAME_KEY = "hbase.test.col.qualifier.name";
+ public static final String HBASE_TEST_NUM_ROWS_TO_PUT_KEY = "hbase.test.num.rows.to.put";
+
+ //Oozie
+ public static final String OOZIE_TEST_DIR_KEY = "oozie.test.dir";
+ public static final String OOZIE_HOME_DIR_KEY = "oozie.home.dir";
+ public static final String OOZIE_USERNAME_KEY = "oozie.username";
+ public static final String OOZIE_GROUPNAME_KEY = "oozie.groupname";
+ public static final String OOZIE_HDFS_SHARE_LIB_DIR_KEY = "oozie.hdfs.share.lib.dir";
+ public static final String OOZIE_SHARE_LIB_CREATE_KEY = "oozie.share.lib.create";
+ public static final String OOZIE_LOCAL_SHARE_LIB_CACHE_DIR_KEY = "oozie.local.share.lib.cache.dir";
+ public static final String OOZIE_PURGE_LOCAL_SHARE_LIB_CACHE_KEY = "oozie.purge.local.share.lib.cache";
+}
diff --git a/hadoop-bootstrap-commons/src/main/java/fr/jetoile/sample/exception/ConfigException.java b/hadoop-bootstrap-commons/src/main/java/fr/jetoile/sample/exception/ConfigException.java
new file mode 100644
index 00000000..fdb7c3fc
--- /dev/null
+++ b/hadoop-bootstrap-commons/src/main/java/fr/jetoile/sample/exception/ConfigException.java
@@ -0,0 +1,7 @@
+package fr.jetoile.sample.exception;
+
+public class ConfigException extends Exception {
+ public ConfigException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/hadoop-bootstrap-hbase/src/main/java/fr/jetoile/sample/component/HBaseBootstrap.java b/hadoop-bootstrap-hbase/src/main/java/fr/jetoile/sample/component/HBaseBootstrap.java
index 3f9b283e..b13ae4c9 100644
--- a/hadoop-bootstrap-hbase/src/main/java/fr/jetoile/sample/component/HBaseBootstrap.java
+++ b/hadoop-bootstrap-hbase/src/main/java/fr/jetoile/sample/component/HBaseBootstrap.java
@@ -23,10 +23,10 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.github.sakserv.minicluster.impl.HbaseLocalCluster;
import com.github.sakserv.minicluster.util.FileUtils;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -77,7 +77,7 @@ private void init() {
private void build() {
org.apache.hadoop.conf.Configuration hbaseConfiguration = new org.apache.hadoop.conf.Configuration();
hbaseConfiguration.setBoolean("hbase.table.sanity.checks", false);
- hbaseConfiguration.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getString(ConfigVars.HDFS_NAMENODE_PORT_KEY));
+ hbaseConfiguration.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getString(Config.HDFS_NAMENODE_PORT_KEY));
hbaseLocalCluster = new HbaseLocalCluster.Builder()
.setHbaseMasterPort(port)
@@ -100,14 +100,14 @@ private void loadConfig() throws BootstrapException {
throw new BootstrapException("bad config", e);
}
- port = configuration.getInt(ConfigVars.HBASE_MASTER_PORT_KEY);
- infoPort = configuration.getInt(ConfigVars.HBASE_MASTER_INFO_PORT_KEY);
- nbRegionServer = configuration.getInt(ConfigVars.HBASE_NUM_REGION_SERVERS_KEY);
- rootDirectory = configuration.getString(ConfigVars.HBASE_ROOT_DIR_KEY);
- zookeeperConnectionString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
- zookeeperPort = configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
- zookeeperZnodeParent = configuration.getString(ConfigVars.HBASE_ZNODE_PARENT_KEY);
- enableWalReplication = configuration.getBoolean(ConfigVars.HBASE_WAL_REPLICATION_ENABLED_KEY);
+ port = configuration.getInt(Config.HBASE_MASTER_PORT_KEY);
+ infoPort = configuration.getInt(Config.HBASE_MASTER_INFO_PORT_KEY);
+ nbRegionServer = configuration.getInt(Config.HBASE_NUM_REGION_SERVERS_KEY);
+ rootDirectory = configuration.getString(Config.HBASE_ROOT_DIR_KEY);
+ zookeeperConnectionString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
+ zookeeperPort = configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
+ zookeeperZnodeParent = configuration.getString(Config.HBASE_ZNODE_PARENT_KEY);
+ enableWalReplication = configuration.getBoolean(Config.HBASE_WAL_REPLICATION_ENABLED_KEY);
}
diff --git a/hadoop-bootstrap-hbase/src/test/java/fr/jetoile/sample/component/HBaseBootstrapTest.java b/hadoop-bootstrap-hbase/src/test/java/fr/jetoile/sample/component/HBaseBootstrapTest.java
index 0f4c9412..19e9839f 100644
--- a/hadoop-bootstrap-hbase/src/test/java/fr/jetoile/sample/component/HBaseBootstrapTest.java
+++ b/hadoop-bootstrap-hbase/src/test/java/fr/jetoile/sample/component/HBaseBootstrapTest.java
@@ -1,8 +1,8 @@
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -48,10 +48,10 @@ public static void tearDown() throws Exception {
@Test
public void hBaseShouldStart() throws Exception {
- String tableName = configuration.getString(ConfigVars.HBASE_TEST_TABLE_NAME_KEY);
- String colFamName = configuration.getString(ConfigVars.HBASE_TEST_COL_FAMILY_NAME_KEY);
- String colQualiferName = configuration.getString(ConfigVars.HBASE_TEST_COL_QUALIFIER_NAME_KEY);
- Integer numRowsToPut = configuration.getInt(ConfigVars.HBASE_TEST_NUM_ROWS_TO_PUT_KEY);
+ String tableName = configuration.getString(Config.HBASE_TEST_TABLE_NAME_KEY);
+ String colFamName = configuration.getString(Config.HBASE_TEST_COL_FAMILY_NAME_KEY);
+ String colQualiferName = configuration.getString(Config.HBASE_TEST_COL_QUALIFIER_NAME_KEY);
+ Integer numRowsToPut = configuration.getInt(Config.HBASE_TEST_NUM_ROWS_TO_PUT_KEY);
org.apache.hadoop.conf.Configuration hbaseConfiguration = HadoopBootstrap.INSTANCE.getService(Component.HBASE).getConfiguration();
LOGGER.info("HBASE: Creating table {} with column family {}", tableName, colFamName);
diff --git a/hadoop-bootstrap-hdfs/src/main/java/fr/jetoile/sample/component/HdfsBootstrap.java b/hadoop-bootstrap-hdfs/src/main/java/fr/jetoile/sample/component/HdfsBootstrap.java
index be6b6ba4..2579f846 100644
--- a/hadoop-bootstrap-hdfs/src/main/java/fr/jetoile/sample/component/HdfsBootstrap.java
+++ b/hadoop-bootstrap-hdfs/src/main/java/fr/jetoile/sample/component/HdfsBootstrap.java
@@ -23,9 +23,9 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.github.sakserv.minicluster.impl.HdfsLocalCluster;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -96,13 +96,13 @@ private void loadConfig() throws BootstrapException {
throw new BootstrapException("bad config", e);
}
- port = configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY);
- httpPort = configuration.getInt(ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY);
- tempDirectory = configuration.getString(ConfigVars.HDFS_TEMP_DIR_KEY);
- numDatanodes = configuration.getInt(ConfigVars.HDFS_NUM_DATANODES_KEY);
- enablePermission = configuration.getBoolean(ConfigVars.HDFS_ENABLE_PERMISSIONS_KEY);
- format = configuration.getBoolean(ConfigVars.HDFS_FORMAT_KEY);
- enableRunningUserAsProxy = configuration.getBoolean(ConfigVars.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER);
+ port = configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY);
+ httpPort = configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY);
+ tempDirectory = configuration.getString(Config.HDFS_TEMP_DIR_KEY);
+ numDatanodes = configuration.getInt(Config.HDFS_NUM_DATANODES_KEY);
+ enablePermission = configuration.getBoolean(Config.HDFS_ENABLE_PERMISSIONS_KEY);
+ format = configuration.getBoolean(Config.HDFS_FORMAT_KEY);
+ enableRunningUserAsProxy = configuration.getBoolean(Config.HDFS_ENABLE_RUNNING_USER_AS_PROXY_USER);
}
@Override
diff --git a/hadoop-bootstrap-hdfs/src/test/java/fr/jetoile/sample/component/HdfsBootstrapTest.java b/hadoop-bootstrap-hdfs/src/test/java/fr/jetoile/sample/component/HdfsBootstrapTest.java
index 4a3ba11f..49f9d295 100644
--- a/hadoop-bootstrap-hdfs/src/test/java/fr/jetoile/sample/component/HdfsBootstrapTest.java
+++ b/hadoop-bootstrap-hdfs/src/test/java/fr/jetoile/sample/component/HdfsBootstrapTest.java
@@ -1,9 +1,8 @@
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
-import com.github.sakserv.minicluster.util.WindowsLibsUtils;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.exception.BootstrapException;
import fr.jetoile.sample.Utils;
@@ -56,20 +55,20 @@ public void hdfsShouldStart() throws Exception {
// Write a file to HDFS containing the test string
FileSystem hdfsFsHandle = ((HdfsBootstrap)HadoopBootstrap.INSTANCE.getService(Component.HDFS)).getHdfsFileSystemHandle();
FSDataOutputStream writer = hdfsFsHandle.create(
- new Path(configuration.getString(ConfigVars.HDFS_TEST_FILE_KEY)));
- writer.writeUTF(configuration.getString(ConfigVars.HDFS_TEST_STRING_KEY));
+ new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
+ writer.writeUTF(configuration.getString(Config.HDFS_TEST_STRING_KEY));
writer.close();
// Read the file and compare to test string
FSDataInputStream reader = hdfsFsHandle.open(
- new Path(configuration.getString(ConfigVars.HDFS_TEST_FILE_KEY)));
- assertEquals(reader.readUTF(), configuration.getString(ConfigVars.HDFS_TEST_STRING_KEY));
+ new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
+ assertEquals(reader.readUTF(), configuration.getString(Config.HDFS_TEST_STRING_KEY));
reader.close();
hdfsFsHandle.close();
URL url = new URL(
String.format( "http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
- configuration.getInt( ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY ) ) );
+ configuration.getInt( Config.HDFS_NAMENODE_HTTP_PORT_KEY ) ) );
URLConnection connection = url.openConnection();
connection.setRequestProperty( "Accept-Charset", "UTF-8" );
BufferedReader response = new BufferedReader( new InputStreamReader( connection.getInputStream() ) );
diff --git a/hadoop-bootstrap-hive/pom.xml b/hadoop-bootstrap-hive/pom.xml
index 48bdc3cd..273d290b 100644
--- a/hadoop-bootstrap-hive/pom.xml
+++ b/hadoop-bootstrap-hive/pom.xml
@@ -44,11 +44,6 @@
9.2.10.v20150310
-
- fr.jetoile.sample
- hadoop-bootstrap-test-utils
- test
-
\ No newline at end of file
diff --git a/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveMetastoreBootstrap.java b/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveMetastoreBootstrap.java
index 6d1a0b0c..83efbcfe 100644
--- a/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveMetastoreBootstrap.java
+++ b/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveMetastoreBootstrap.java
@@ -23,11 +23,11 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.github.sakserv.minicluster.impl.HiveLocalMetaStore;
import com.github.sakserv.minicluster.util.FileUtils;
import com.github.sakserv.minicluster.util.WindowsLibsUtils;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -75,11 +75,11 @@ private void loadConfig() throws BootstrapException {
} catch (ConfigurationException e) {
throw new BootstrapException("bad config", e);
}
- host = configuration.getString(ConfigVars.HIVE_METASTORE_HOSTNAME_KEY);
- port = configuration.getInt(ConfigVars.HIVE_METASTORE_PORT_KEY);
- derbyDirectory = configuration.getString(ConfigVars.HIVE_METASTORE_DERBY_DB_DIR_KEY);
- scratchDirectory = configuration.getString(ConfigVars.HIVE_SCRATCH_DIR_KEY);
- warehouseDirectory = configuration.getString(ConfigVars.HIVE_WAREHOUSE_DIR_KEY);
+ host = configuration.getString(Config.HIVE_METASTORE_HOSTNAME_KEY);
+ port = configuration.getInt(Config.HIVE_METASTORE_PORT_KEY);
+ derbyDirectory = configuration.getString(Config.HIVE_METASTORE_DERBY_DB_DIR_KEY);
+ scratchDirectory = configuration.getString(Config.HIVE_SCRATCH_DIR_KEY);
+ warehouseDirectory = configuration.getString(Config.HIVE_WAREHOUSE_DIR_KEY);
}
diff --git a/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveServer2Bootstrap.java b/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveServer2Bootstrap.java
index eef3477f..94a98ad5 100644
--- a/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveServer2Bootstrap.java
+++ b/hadoop-bootstrap-hive/src/main/java/fr/jetoile/sample/component/HiveServer2Bootstrap.java
@@ -23,11 +23,11 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.github.sakserv.minicluster.impl.HiveLocalServer2;
import com.github.sakserv.minicluster.util.FileUtils;
import com.github.sakserv.minicluster.util.WindowsLibsUtils;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -79,14 +79,14 @@ private void loadConfig() throws BootstrapException {
} catch (ConfigurationException e) {
throw new BootstrapException("bad config", e);
}
- host = configuration.getString(ConfigVars.HIVE_SERVER2_HOSTNAME_KEY);
- port = configuration.getInt(ConfigVars.HIVE_SERVER2_PORT_KEY);
- hostMetastore = configuration.getString(ConfigVars.HIVE_METASTORE_HOSTNAME_KEY);
- portMetastore = configuration.getInt(ConfigVars.HIVE_METASTORE_PORT_KEY);
- derbyDirectory = configuration.getString(ConfigVars.HIVE_METASTORE_DERBY_DB_DIR_KEY);
- scratchDirectory = configuration.getString(ConfigVars.HIVE_SCRATCH_DIR_KEY);
- warehouseDirectory = configuration.getString(ConfigVars.HIVE_WAREHOUSE_DIR_KEY);
- zookeeperConnectionString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
+ host = configuration.getString(Config.HIVE_SERVER2_HOSTNAME_KEY);
+ port = configuration.getInt(Config.HIVE_SERVER2_PORT_KEY);
+ hostMetastore = configuration.getString(Config.HIVE_METASTORE_HOSTNAME_KEY);
+ portMetastore = configuration.getInt(Config.HIVE_METASTORE_PORT_KEY);
+ derbyDirectory = configuration.getString(Config.HIVE_METASTORE_DERBY_DB_DIR_KEY);
+ scratchDirectory = configuration.getString(Config.HIVE_SCRATCH_DIR_KEY);
+ warehouseDirectory = configuration.getString(Config.HIVE_WAREHOUSE_DIR_KEY);
+ zookeeperConnectionString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
}
diff --git a/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveMetastoreBootstrapTest.java b/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveMetastoreBootstrapTest.java
index 5f4f1221..d7c8dc8a 100644
--- a/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveMetastoreBootstrapTest.java
+++ b/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveMetastoreBootstrapTest.java
@@ -1,8 +1,8 @@
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.Utils;
import fr.jetoile.sample.exception.BootstrapException;
@@ -65,8 +65,8 @@ public void hiveMetastoreShouldStart() throws InterruptedException, NotFoundServ
try {
HiveMetaStoreClient hiveClient = new HiveMetaStoreClient((HiveConf) HadoopBootstrap.INSTANCE.getService(Component.HIVEMETA).getConfiguration());
- hiveClient.dropTable(configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY),
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY),
+ hiveClient.dropTable(configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY),
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY),
true,
true);
@@ -76,7 +76,7 @@ public void hiveMetastoreShouldStart() throws InterruptedException, NotFoundServ
cols.add(new FieldSchema("msg", serdeConstants.STRING_TYPE_NAME, ""));
// Values for the StorageDescriptor
- String location = new File(configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY)).getAbsolutePath();
+ String location = new File(configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY)).getAbsolutePath();
String inputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat";
String outputFormat = "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat";
int numBuckets = 16;
@@ -100,8 +100,8 @@ public void hiveMetastoreShouldStart() throws InterruptedException, NotFoundServ
// Define the table
Table tbl = new Table();
- tbl.setDbName(configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY));
- tbl.setTableName(configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY));
+ tbl.setDbName(configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY));
+ tbl.setTableName(configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY));
tbl.setSd(sd);
tbl.setOwner(System.getProperty("user.name"));
tbl.setParameters(new HashMap<>());
@@ -117,9 +117,9 @@ public void hiveMetastoreShouldStart() throws InterruptedException, NotFoundServ
// Describe the table
Table createdTable = hiveClient.getTable(
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY),
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY));
- assertThat(createdTable.toString()).contains(configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY));
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY),
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY));
+ assertThat(createdTable.toString()).contains(configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY));
} catch (MetaException e) {
e.printStackTrace();
diff --git a/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveServer2BootstrapTest.java b/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveServer2BootstrapTest.java
index ca47764c..a8e07148 100644
--- a/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveServer2BootstrapTest.java
+++ b/hadoop-bootstrap-hive/src/test/java/fr/jetoile/sample/component/HiveServer2BootstrapTest.java
@@ -1,8 +1,7 @@
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
-import com.github.sakserv.minicluster.util.WindowsLibsUtils;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -58,9 +57,9 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
//
// Get the connection
Connection con = DriverManager.getConnection("jdbc:hive2://" +
- configuration.getString(ConfigVars.HIVE_SERVER2_HOSTNAME_KEY) + ":" +
- configuration.getInt(ConfigVars.HIVE_SERVER2_PORT_KEY) + "/" +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY),
+ configuration.getString(Config.HIVE_SERVER2_HOSTNAME_KEY) + ":" +
+ configuration.getInt(Config.HIVE_SERVER2_PORT_KEY) + "/" +
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY),
"user",
"pass");
@@ -68,7 +67,7 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
Statement stmt;
try {
String createDbDdl = "CREATE DATABASE IF NOT EXISTS " +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY);
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Create Database Statement: {}", createDbDdl);
stmt.execute(createDbDdl);
@@ -77,16 +76,16 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
}
// Drop the table incase it still exists
- String dropDdl = "DROP TABLE " + configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY);
+ String dropDdl = "DROP TABLE " + configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Drop Table Statement: {}", dropDdl);
stmt.execute(dropDdl);
// Create the ORC table
String createDdl = "CREATE TABLE IF NOT EXISTS " +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY) + " (id INT, msg STRING) " +
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY) + " (id INT, msg STRING) " +
"PARTITIONED BY (dt STRING) " +
"CLUSTERED BY (id) INTO 16 BUCKETS " +
"STORED AS ORC tblproperties(\"orc.compress\"=\"NONE\")";
@@ -97,7 +96,7 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
// Issue a describe on the new table and display the output
LOGGER.info("HIVE: Validating Table was Created: ");
ResultSet resultSet = stmt.executeQuery("DESCRIBE FORMATTED " +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY));
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY));
int count = 0;
while (resultSet.next()) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
@@ -110,8 +109,8 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
assertEquals(33, count);
// Drop the table
- dropDdl = "DROP TABLE " + configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY);
+ dropDdl = "DROP TABLE " + configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Drop Table Statement: {}", dropDdl);
stmt.execute(dropDdl);
diff --git a/hadoop-bootstrap-kafka/src/main/java/fr/jetoile/sample/component/KafkaBootstrap.java b/hadoop-bootstrap-kafka/src/main/java/fr/jetoile/sample/component/KafkaBootstrap.java
index f3d9a335..f90d1e9f 100644
--- a/hadoop-bootstrap-kafka/src/main/java/fr/jetoile/sample/component/KafkaBootstrap.java
+++ b/hadoop-bootstrap-kafka/src/main/java/fr/jetoile/sample/component/KafkaBootstrap.java
@@ -23,9 +23,9 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.github.sakserv.minicluster.impl.KafkaLocalBroker;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -91,11 +91,11 @@ private void loadConfig() throws BootstrapException {
} catch (ConfigurationException e) {
throw new BootstrapException("bad config", e);
}
- host = configuration.getString(ConfigVars.KAFKA_HOSTNAME_KEY);
- port = configuration.getInt(ConfigVars.KAFKA_PORT_KEY);
- brokerId = configuration.getInt(ConfigVars.KAFKA_TEST_BROKER_ID_KEY);
- tmpDirectory = configuration.getString(ConfigVars.KAFKA_TEST_TEMP_DIR_KEY);
- zookeeperConnectionString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
+ host = configuration.getString(Config.KAFKA_HOSTNAME_KEY);
+ port = configuration.getInt(Config.KAFKA_PORT_KEY);
+ brokerId = configuration.getInt(Config.KAFKA_TEST_BROKER_ID_KEY);
+ tmpDirectory = configuration.getString(Config.KAFKA_TEST_TEMP_DIR_KEY);
+ zookeeperConnectionString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
}
diff --git a/hadoop-bootstrap-kafka/src/test/java/fr/jetoile/sample/component/KafkaBootstrapTest.java b/hadoop-bootstrap-kafka/src/test/java/fr/jetoile/sample/component/KafkaBootstrapTest.java
index 992a0d8f..85c71475 100644
--- a/hadoop-bootstrap-kafka/src/test/java/fr/jetoile/sample/component/KafkaBootstrapTest.java
+++ b/hadoop-bootstrap-kafka/src/test/java/fr/jetoile/sample/component/KafkaBootstrapTest.java
@@ -1,7 +1,7 @@
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.exception.BootstrapException;
import fr.jetoile.sample.kafka.consumer.KafkaTestConsumer;
@@ -48,27 +48,27 @@ public void kafkaShouldStart() throws Exception {
// Producer
KafkaTestProducer kafkaTestProducer = new KafkaTestProducer.Builder()
- .setKafkaHostname(configuration.getString(ConfigVars.KAFKA_HOSTNAME_KEY))
- .setKafkaPort(configuration.getInt(ConfigVars.KAFKA_PORT_KEY))
- .setTopic(configuration.getString(ConfigVars.KAFKA_TEST_TOPIC_KEY))
- .setMessageCount(configuration.getInt(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY))
+ .setKafkaHostname(configuration.getString(Config.KAFKA_HOSTNAME_KEY))
+ .setKafkaPort(configuration.getInt(Config.KAFKA_PORT_KEY))
+ .setTopic(configuration.getString(Config.KAFKA_TEST_TOPIC_KEY))
+ .setMessageCount(configuration.getInt(Config.KAFKA_TEST_MESSAGE_COUNT_KEY))
.build();
kafkaTestProducer.produceMessages();
// Consumer
List seeds = new ArrayList();
- seeds.add(configuration.getString(ConfigVars.KAFKA_HOSTNAME_KEY));
+ seeds.add(configuration.getString(Config.KAFKA_HOSTNAME_KEY));
KafkaTestConsumer kafkaTestConsumer = new KafkaTestConsumer();
kafkaTestConsumer.consumeMessages2(
- configuration.getInt(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY),
- configuration.getString(ConfigVars.KAFKA_TEST_TOPIC_KEY),
+ configuration.getInt(Config.KAFKA_TEST_MESSAGE_COUNT_KEY),
+ configuration.getString(Config.KAFKA_TEST_TOPIC_KEY),
0,
seeds,
- configuration.getInt(ConfigVars.KAFKA_PORT_KEY));
+ configuration.getInt(Config.KAFKA_PORT_KEY));
// Assert num of messages produced = num of message consumed
- Assert.assertEquals(configuration.getLong(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY),
+ Assert.assertEquals(configuration.getLong(Config.KAFKA_TEST_MESSAGE_COUNT_KEY),
kafkaTestConsumer.getNumRead());
}
}
diff --git a/hadoop-bootstrap-oozie/src/main/java/fr/jetoile/sample/component/OozieBootstrap.java b/hadoop-bootstrap-oozie/src/main/java/fr/jetoile/sample/component/OozieBootstrap.java
index aebe6c3f..1e3db990 100644
--- a/hadoop-bootstrap-oozie/src/main/java/fr/jetoile/sample/component/OozieBootstrap.java
+++ b/hadoop-bootstrap-oozie/src/main/java/fr/jetoile/sample/component/OozieBootstrap.java
@@ -23,11 +23,11 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.github.sakserv.minicluster.impl.MRLocalCluster;
import com.github.sakserv.minicluster.impl.OozieLocalServer;
import com.github.sakserv.minicluster.util.FileUtils;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
@@ -156,25 +156,25 @@ private void loadConfig() throws BootstrapException, NotFoundServiceException {
throw new BootstrapException("bad config", e);
}
- oozieTestDir = configuration.getString(ConfigVars.OOZIE_TEST_DIR_KEY);
- oozieHomeDir = configuration.getString(ConfigVars.OOZIE_HOME_DIR_KEY);
+ oozieTestDir = configuration.getString(Config.OOZIE_TEST_DIR_KEY);
+ oozieHomeDir = configuration.getString(Config.OOZIE_HOME_DIR_KEY);
oozieUsername = System.getProperty("user.name");
- oozieGroupname = configuration.getString(ConfigVars.OOZIE_GROUPNAME_KEY);
- oozieYarnResourceManagerAddress = configuration.getString(ConfigVars.YARN_RESOURCE_MANAGER_ADDRESS_KEY);
-
- oozieHdfsShareLibDir = configuration.getString(ConfigVars.OOZIE_HDFS_SHARE_LIB_DIR_KEY);
- oozieShareLibCreate = configuration.getBoolean(ConfigVars.OOZIE_SHARE_LIB_CREATE_KEY);
- oozieLocalShareLibCacheDir = configuration.getString(ConfigVars.OOZIE_LOCAL_SHARE_LIB_CACHE_DIR_KEY);
- ooziePurgeLocalShareLibCache = configuration.getBoolean(ConfigVars.OOZIE_PURGE_LOCAL_SHARE_LIB_CACHE_KEY);
-
- numNodeManagers = Integer.parseInt(configuration.getString(ConfigVars.YARN_NUM_NODE_MANAGERS_KEY));
- jobHistoryAddress = configuration.getString(ConfigVars.MR_JOB_HISTORY_ADDRESS_KEY);
- resourceManagerAddress = configuration.getString(ConfigVars.YARN_RESOURCE_MANAGER_ADDRESS_KEY);
- resourceManagerHostname = configuration.getString(ConfigVars.YARN_RESOURCE_MANAGER_HOSTNAME_KEY);
- resourceManagerSchedulerAddress = configuration.getString(ConfigVars.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY);
- resourceManagerResourceTrackerAddress = configuration.getString(ConfigVars.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY);
- resourceManagerWebappAddress = configuration.getString(ConfigVars.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY);
- useInJvmContainerExecutor = configuration.getBoolean(ConfigVars.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY);
+ oozieGroupname = configuration.getString(Config.OOZIE_GROUPNAME_KEY);
+ oozieYarnResourceManagerAddress = configuration.getString(Config.YARN_RESOURCE_MANAGER_ADDRESS_KEY);
+
+ oozieHdfsShareLibDir = configuration.getString(Config.OOZIE_HDFS_SHARE_LIB_DIR_KEY);
+ oozieShareLibCreate = configuration.getBoolean(Config.OOZIE_SHARE_LIB_CREATE_KEY);
+ oozieLocalShareLibCacheDir = configuration.getString(Config.OOZIE_LOCAL_SHARE_LIB_CACHE_DIR_KEY);
+ ooziePurgeLocalShareLibCache = configuration.getBoolean(Config.OOZIE_PURGE_LOCAL_SHARE_LIB_CACHE_KEY);
+
+ numNodeManagers = Integer.parseInt(configuration.getString(Config.YARN_NUM_NODE_MANAGERS_KEY));
+ jobHistoryAddress = configuration.getString(Config.MR_JOB_HISTORY_ADDRESS_KEY);
+ resourceManagerAddress = configuration.getString(Config.YARN_RESOURCE_MANAGER_ADDRESS_KEY);
+ resourceManagerHostname = configuration.getString(Config.YARN_RESOURCE_MANAGER_HOSTNAME_KEY);
+ resourceManagerSchedulerAddress = configuration.getString(Config.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY);
+ resourceManagerResourceTrackerAddress = configuration.getString(Config.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY);
+ resourceManagerWebappAddress = configuration.getString(Config.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY);
+ useInJvmContainerExecutor = configuration.getBoolean(Config.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY);
ooziePort = configuration.getInt(OOZIE_PORT);
oozieHost = configuration.getString(OOZIE_HOST);
diff --git a/hadoop-bootstrap-solrcloud/src/main/java/fr/jetoile/sample/component/SolrCloudBootstrap.java b/hadoop-bootstrap-solrcloud/src/main/java/fr/jetoile/sample/component/SolrCloudBootstrap.java
index cf38616d..08490398 100644
--- a/hadoop-bootstrap-solrcloud/src/main/java/fr/jetoile/sample/component/SolrCloudBootstrap.java
+++ b/hadoop-bootstrap-solrcloud/src/main/java/fr/jetoile/sample/component/SolrCloudBootstrap.java
@@ -23,8 +23,8 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -102,7 +102,7 @@ private void loadConfig() throws BootstrapException {
solrDirectory = configuration.getString(SOLR_DIR_KEY);
solrCollectionName = configuration.getString(SOLR_COLLECTION_NAME);
solrPort = configuration.getInt(SOLR_PORT);
- zkHostString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
+ zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
}
diff --git a/hadoop-bootstrap-solrcloud/src/test/java/fr/jetoile/sample/component/SolrCloudBootstrapTest.java b/hadoop-bootstrap-solrcloud/src/test/java/fr/jetoile/sample/component/SolrCloudBootstrapTest.java
index 99635930..7e8e00ea 100644
--- a/hadoop-bootstrap-solrcloud/src/test/java/fr/jetoile/sample/component/SolrCloudBootstrapTest.java
+++ b/hadoop-bootstrap-solrcloud/src/test/java/fr/jetoile/sample/component/SolrCloudBootstrapTest.java
@@ -52,7 +52,7 @@ public void solrCloudShouldStart() throws IOException, SolrServerException, Keep
String collectionName = configuration.getString(SolrCloudBootstrap.SOLR_COLLECTION_NAME);
-// String zkHostString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
+// String zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
// CloudSolrClient client = new CloudSolrClient(zkHostString);
CloudSolrClient client = ((SolrCloudBootstrap) HadoopBootstrap.INSTANCE.getService(Component.SOLRCLOUD)).getClient();
diff --git a/hadoop-bootstrap-standalone/pom.xml b/hadoop-bootstrap-standalone/pom.xml
index 798754bf..4a9d54e6 100644
--- a/hadoop-bootstrap-standalone/pom.xml
+++ b/hadoop-bootstrap-standalone/pom.xml
@@ -12,6 +12,7 @@
hadoop-bootstrap-standalone
+
fr.jetoile.sample
hadoop-bootstrap-commons
@@ -62,97 +63,37 @@
commons-configuration
-
- org.apache.hadoop
- hadoop-client
-
-
- servlet-api
- javax.servlet
-
-
-
-
-
- org.apache.spark
- spark-core_2.10
- 1.6.0
- test
-
-
- hadoop-client
- org.apache.hadoop
-
-
-
- org.apache.spark
- spark-sql_2.10
- 1.6.0
+ fr.jetoile.sample
+ hadoop-bootstrap-client-spark
test
- org.apache.spark
- spark-hive_2.10
- 1.6.0
+ fr.jetoile.sample
+ hadoop-bootstrap-client-solrcloud
test
fr.jetoile.sample
- hadoop-bootstrap-test-utils
+ hadoop-bootstrap-client-hdfs
test
- com.lucidworks.spark
- spark-solr
- 1.1.2
+ fr.jetoile.sample
+ hadoop-bootstrap-client-hive
test
-
-
- org.ow2.asm
- asm
-
-
- org.ow2.asm
- asm-commons
-
-
- spark-mllib_2.10
- org.apache.spark
-
-
- spark-streaming-twitter_2.10
- org.apache.spark
-
-
- org.scala-lang
- scala-library
-
-
- hadoop-hdfs
- org.apache.hadoop
-
-
- hadoop-client
- org.apache.hadoop
-
-
- solr-core
- org.apache.solr
-
-
-
- org.apache.derby
- derbyclient
- 10.10.2.0
- test
-
+
+
+
+
+
+
diff --git a/hadoop-bootstrap-standalone/src/main/conf/hadoop.properties b/hadoop-bootstrap-standalone/src/main/conf/hadoop.properties
index b6de7ece..a569ccca 100755
--- a/hadoop-bootstrap-standalone/src/main/conf/hadoop.properties
+++ b/hadoop-bootstrap-standalone/src/main/conf/hadoop.properties
@@ -1,10 +1,10 @@
zookeeper=true
hdfs=true
-#hivemeta=true
-#hiveserver2=true
+hivemeta=true
+hiveserver2=true
#kafka=true
#hbase=true
-#solrcloud=true
-solr=true
+solrcloud=true
+#solr=true
#oozie=true
diff --git a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/IntegrationBootstrapTest.java b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/IntegrationBootstrapTest.java
index 105a5e71..ff7f2182 100644
--- a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/IntegrationBootstrapTest.java
+++ b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/IntegrationBootstrapTest.java
@@ -1,8 +1,9 @@
package fr.jetoile.sample.integrationtest;
-import com.github.sakserv.minicluster.config.ConfigVars;
+import fr.jetoile.hadoop.test.hdfs.HdfsUtils;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.Utils;
import fr.jetoile.sample.component.HdfsBootstrap;
@@ -79,7 +80,7 @@ public void solrCloudShouldStart() throws IOException, SolrServerException, Keep
String collectionName = configuration.getString(SolrCloudBootstrap.SOLR_COLLECTION_NAME);
- String zkHostString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
+ String zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
CloudSolrClient client = new CloudSolrClient(zkHostString);
for (int i = 0; i < 1000; ++i) {
@@ -107,31 +108,32 @@ public void kafkaShouldStart() throws Exception {
// Producer
KafkaTestProducer kafkaTestProducer = new KafkaTestProducer.Builder()
- .setKafkaHostname(configuration.getString(ConfigVars.KAFKA_HOSTNAME_KEY))
- .setKafkaPort(configuration.getInt(ConfigVars.KAFKA_PORT_KEY))
- .setTopic(configuration.getString(ConfigVars.KAFKA_TEST_TOPIC_KEY))
- .setMessageCount(configuration.getInt(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY))
+ .setKafkaHostname(configuration.getString(Config.KAFKA_HOSTNAME_KEY))
+ .setKafkaPort(configuration.getInt(Config.KAFKA_PORT_KEY))
+ .setTopic(configuration.getString(Config.KAFKA_TEST_TOPIC_KEY))
+ .setMessageCount(configuration.getInt(Config.KAFKA_TEST_MESSAGE_COUNT_KEY))
.build();
kafkaTestProducer.produceMessages();
// Consumer
List seeds = new ArrayList();
- seeds.add(configuration.getString(ConfigVars.KAFKA_HOSTNAME_KEY));
+ seeds.add(configuration.getString(Config.KAFKA_HOSTNAME_KEY));
KafkaTestConsumer kafkaTestConsumer = new KafkaTestConsumer();
kafkaTestConsumer.consumeMessages2(
- configuration.getInt(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY),
- configuration.getString(ConfigVars.KAFKA_TEST_TOPIC_KEY),
+ configuration.getInt(Config.KAFKA_TEST_MESSAGE_COUNT_KEY),
+ configuration.getString(Config.KAFKA_TEST_TOPIC_KEY),
0,
seeds,
- configuration.getInt(ConfigVars.KAFKA_PORT_KEY));
+ configuration.getInt(Config.KAFKA_PORT_KEY));
// Assert num of messages produced = num of message consumed
- Assert.assertEquals(configuration.getLong(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY),
+ Assert.assertEquals(configuration.getLong(Config.KAFKA_TEST_MESSAGE_COUNT_KEY),
kafkaTestConsumer.getNumRead());
}
@Test
+ @Ignore
public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundException, SQLException {
// assertThat(Utils.available("127.0.0.1", 20103)).isFalse();
@@ -145,9 +147,9 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
//
// Get the connection
Connection con = DriverManager.getConnection("jdbc:hive2://" +
- configuration.getString(ConfigVars.HIVE_SERVER2_HOSTNAME_KEY) + ":" +
- configuration.getInt(ConfigVars.HIVE_SERVER2_PORT_KEY) + "/" +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY),
+ configuration.getString(Config.HIVE_SERVER2_HOSTNAME_KEY) + ":" +
+ configuration.getInt(Config.HIVE_SERVER2_PORT_KEY) + "/" +
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY),
"user",
"pass");
@@ -155,7 +157,7 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
Statement stmt;
try {
String createDbDdl = "CREATE DATABASE IF NOT EXISTS " +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY);
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Create Database Statement: {}", createDbDdl);
stmt.execute(createDbDdl);
@@ -164,16 +166,16 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
}
// Drop the table incase it still exists
- String dropDdl = "DROP TABLE " + configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY);
+ String dropDdl = "DROP TABLE " + configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Drop Table Statement: {}", dropDdl);
stmt.execute(dropDdl);
// Create the ORC table
String createDdl = "CREATE TABLE IF NOT EXISTS " +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY) + " (id INT, msg STRING) " +
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY) + " (id INT, msg STRING) " +
"PARTITIONED BY (dt STRING) " +
"CLUSTERED BY (id) INTO 16 BUCKETS " +
"STORED AS ORC tblproperties(\"orc.compress\"=\"NONE\")";
@@ -184,7 +186,7 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
// Issue a describe on the new table and display the output
LOGGER.info("HIVE: Validating Table was Created: ");
ResultSet resultSet = stmt.executeQuery("DESCRIBE FORMATTED " +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY));
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY));
int count = 0;
while (resultSet.next()) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
@@ -197,8 +199,8 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
assertEquals(33, count);
// Drop the table
- dropDdl = "DROP TABLE " + configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY);
+ dropDdl = "DROP TABLE " + configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Drop Table Statement: {}", dropDdl);
stmt.execute(dropDdl);
@@ -208,28 +210,30 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
@Test
public void hdfsShouldStart() throws Exception {
- assertThat(Utils.available("127.0.0.1", configuration.getInt(ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY))).isFalse();
+ assertThat(Utils.available("127.0.0.1", configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY))).isFalse();
- org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
- conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
-
- URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
+// org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
+// conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
+//
+// URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
+//
+// FileSystem hdfsFsHandle = FileSystem.get (uri, conf);
+ FileSystem hdfsFsHandle = HdfsUtils.INSTANCE.getFileSystem();
- FileSystem hdfsFsHandle = FileSystem.get (uri, conf);
- FSDataOutputStream writer = hdfsFsHandle.create(new Path(configuration.getString(ConfigVars.HDFS_TEST_FILE_KEY)));
- writer.writeUTF(configuration.getString(ConfigVars.HDFS_TEST_STRING_KEY));
+ FSDataOutputStream writer = hdfsFsHandle.create(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
+ writer.writeUTF(configuration.getString(Config.HDFS_TEST_STRING_KEY));
writer.close();
// Read the file and compare to test string
- FSDataInputStream reader = hdfsFsHandle.open(new Path(configuration.getString(ConfigVars.HDFS_TEST_FILE_KEY)));
- assertEquals(reader.readUTF(), configuration.getString(ConfigVars.HDFS_TEST_STRING_KEY));
+ FSDataInputStream reader = hdfsFsHandle.open(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
+ assertEquals(reader.readUTF(), configuration.getString(Config.HDFS_TEST_STRING_KEY));
reader.close();
hdfsFsHandle.close();
URL url = new URL(
String.format( "http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
- configuration.getInt( ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY ) ) );
+ configuration.getInt( Config.HDFS_NAMENODE_HTTP_PORT_KEY ) ) );
URLConnection connection = url.openConnection();
connection.setRequestProperty( "Accept-Charset", "UTF-8" );
BufferedReader response = new BufferedReader( new InputStreamReader( connection.getInputStream() ) );
@@ -243,16 +247,16 @@ public void hdfsShouldStart() throws Exception {
@Test
public void hBaseShouldStart() throws Exception {
- String tableName = configuration.getString(ConfigVars.HBASE_TEST_TABLE_NAME_KEY);
- String colFamName = configuration.getString(ConfigVars.HBASE_TEST_COL_FAMILY_NAME_KEY);
- String colQualiferName = configuration.getString(ConfigVars.HBASE_TEST_COL_QUALIFIER_NAME_KEY);
- Integer numRowsToPut = configuration.getInt(ConfigVars.HBASE_TEST_NUM_ROWS_TO_PUT_KEY);
+ String tableName = configuration.getString(Config.HBASE_TEST_TABLE_NAME_KEY);
+ String colFamName = configuration.getString(Config.HBASE_TEST_COL_FAMILY_NAME_KEY);
+ String colQualiferName = configuration.getString(Config.HBASE_TEST_COL_QUALIFIER_NAME_KEY);
+ Integer numRowsToPut = configuration.getInt(Config.HBASE_TEST_NUM_ROWS_TO_PUT_KEY);
org.apache.hadoop.conf.Configuration hbaseConfiguration = HBaseConfiguration.create();
- hbaseConfiguration.set("hbase.zookeeper.quorum", configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY));
- hbaseConfiguration.setInt("hbase.zookeeper.property.clientPort", configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY));
- hbaseConfiguration.set("hbase.master", "127.0.0.1:" + configuration.getInt(ConfigVars.HBASE_MASTER_PORT_KEY));
- hbaseConfiguration.set("zookeeper.znode.parent", configuration.getString(ConfigVars.HBASE_ZNODE_PARENT_KEY));
+ hbaseConfiguration.set("hbase.zookeeper.quorum", configuration.getString(Config.ZOOKEEPER_HOST_KEY));
+ hbaseConfiguration.setInt("hbase.zookeeper.property.clientPort", configuration.getInt(Config.ZOOKEEPER_PORT_KEY));
+ hbaseConfiguration.set("hbase.master", "127.0.0.1:" + configuration.getInt(Config.HBASE_MASTER_PORT_KEY));
+ hbaseConfiguration.set("zookeeper.znode.parent", configuration.getString(Config.HBASE_ZNODE_PARENT_KEY));
LOGGER.info("HBASE: Creating table {} with column family {}", tableName, colFamName);
@@ -276,9 +280,9 @@ public void oozieShouldStart() throws Exception {
LOGGER.info("OOZIE: Test Submit Workflow Start");
org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
- conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
+ conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
- URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
+ URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
FileSystem hdfsFs = FileSystem.get (uri, conf);
diff --git a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/ManualIntegrationBootstrapTest.java b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/ManualIntegrationBootstrapTest.java
index d9de16e5..3bcd99a6 100644
--- a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/ManualIntegrationBootstrapTest.java
+++ b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/ManualIntegrationBootstrapTest.java
@@ -1,7 +1,8 @@
package fr.jetoile.sample.integrationtest;
-import com.github.sakserv.minicluster.config.ConfigVars;
+import fr.jetoile.hadoop.test.hdfs.HdfsUtils;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.Utils;
import fr.jetoile.sample.component.OozieBootstrap;
import fr.jetoile.sample.component.SolrCloudBootstrap;
@@ -74,7 +75,7 @@ public void solrCloudShouldStart() throws IOException, SolrServerException, Keep
String collectionName = configuration.getString(SolrCloudBootstrap.SOLR_COLLECTION_NAME);
- String zkHostString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
+ String zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
CloudSolrClient client = new CloudSolrClient(zkHostString);
for (int i = 0; i < 1000; ++i) {
@@ -102,27 +103,27 @@ public void kafkaShouldStart() throws Exception {
// Producer
KafkaTestProducer kafkaTestProducer = new KafkaTestProducer.Builder()
- .setKafkaHostname(configuration.getString(ConfigVars.KAFKA_HOSTNAME_KEY))
- .setKafkaPort(configuration.getInt(ConfigVars.KAFKA_PORT_KEY))
- .setTopic(configuration.getString(ConfigVars.KAFKA_TEST_TOPIC_KEY))
- .setMessageCount(configuration.getInt(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY))
+ .setKafkaHostname(configuration.getString(Config.KAFKA_HOSTNAME_KEY))
+ .setKafkaPort(configuration.getInt(Config.KAFKA_PORT_KEY))
+ .setTopic(configuration.getString(Config.KAFKA_TEST_TOPIC_KEY))
+ .setMessageCount(configuration.getInt(Config.KAFKA_TEST_MESSAGE_COUNT_KEY))
.build();
kafkaTestProducer.produceMessages();
// Consumer
List seeds = new ArrayList();
- seeds.add(configuration.getString(ConfigVars.KAFKA_HOSTNAME_KEY));
+ seeds.add(configuration.getString(Config.KAFKA_HOSTNAME_KEY));
KafkaTestConsumer kafkaTestConsumer = new KafkaTestConsumer();
kafkaTestConsumer.consumeMessages2(
- configuration.getInt(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY),
- configuration.getString(ConfigVars.KAFKA_TEST_TOPIC_KEY),
+ configuration.getInt(Config.KAFKA_TEST_MESSAGE_COUNT_KEY),
+ configuration.getString(Config.KAFKA_TEST_TOPIC_KEY),
0,
seeds,
- configuration.getInt(ConfigVars.KAFKA_PORT_KEY));
+ configuration.getInt(Config.KAFKA_PORT_KEY));
// Assert num of messages produced = num of message consumed
- Assert.assertEquals(configuration.getLong(ConfigVars.KAFKA_TEST_MESSAGE_COUNT_KEY),
+ Assert.assertEquals(configuration.getLong(Config.KAFKA_TEST_MESSAGE_COUNT_KEY),
kafkaTestConsumer.getNumRead());
}
@@ -140,9 +141,9 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
//
// Get the connection
Connection con = DriverManager.getConnection("jdbc:hive2://" +
- configuration.getString(ConfigVars.HIVE_SERVER2_HOSTNAME_KEY) + ":" +
- configuration.getInt(ConfigVars.HIVE_SERVER2_PORT_KEY) + "/" +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY),
+ configuration.getString(Config.HIVE_SERVER2_HOSTNAME_KEY) + ":" +
+ configuration.getInt(Config.HIVE_SERVER2_PORT_KEY) + "/" +
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY),
"user",
"pass");
@@ -150,7 +151,7 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
Statement stmt;
try {
String createDbDdl = "CREATE DATABASE IF NOT EXISTS " +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY);
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Create Database Statement: {}", createDbDdl);
stmt.execute(createDbDdl);
@@ -159,16 +160,16 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
}
// Drop the table incase it still exists
- String dropDdl = "DROP TABLE " + configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY);
+ String dropDdl = "DROP TABLE " + configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Drop Table Statement: {}", dropDdl);
stmt.execute(dropDdl);
// Create the ORC table
String createDdl = "CREATE TABLE IF NOT EXISTS " +
- configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY) + " (id INT, msg STRING) " +
+ configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY) + " (id INT, msg STRING) " +
"PARTITIONED BY (dt STRING) " +
"CLUSTERED BY (id) INTO 16 BUCKETS " +
"STORED AS ORC tblproperties(\"orc.compress\"=\"NONE\")";
@@ -179,7 +180,7 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
// Issue a describe on the new table and display the output
LOGGER.info("HIVE: Validating Table was Created: ");
ResultSet resultSet = stmt.executeQuery("DESCRIBE FORMATTED " +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY));
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY));
int count = 0;
while (resultSet.next()) {
ResultSetMetaData resultSetMetaData = resultSet.getMetaData();
@@ -192,8 +193,8 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
assertEquals(33, count);
// Drop the table
- dropDdl = "DROP TABLE " + configuration.getString(ConfigVars.HIVE_TEST_DATABASE_NAME_KEY) + "." +
- configuration.getString(ConfigVars.HIVE_TEST_TABLE_NAME_KEY);
+ dropDdl = "DROP TABLE " + configuration.getString(Config.HIVE_TEST_DATABASE_NAME_KEY) + "." +
+ configuration.getString(Config.HIVE_TEST_TABLE_NAME_KEY);
stmt = con.createStatement();
LOGGER.info("HIVE: Running Drop Table Statement: {}", dropDdl);
stmt.execute(dropDdl);
@@ -204,28 +205,30 @@ public void hiveServer2ShouldStart() throws InterruptedException, ClassNotFoundE
@Test
public void hdfsShouldStart() throws Exception {
- assertThat(Utils.available("127.0.0.1", configuration.getInt(ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY))).isFalse();
+// assertThat(Utils.available("127.0.0.1", configuration.getInt(Config.HDFS_NAMENODE_HTTP_PORT_KEY))).isFalse();
+//
+// org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
+// conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
+//
+// URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
+//
+// FileSystem hdfsFsHandle = FileSystem.get (uri, conf);
+ FileSystem hdfsFsHandle = HdfsUtils.INSTANCE.getFileSystem();
- org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
- conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
-
- URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
-
- FileSystem hdfsFsHandle = FileSystem.get (uri, conf);
- FSDataOutputStream writer = hdfsFsHandle.create(new Path(configuration.getString(ConfigVars.HDFS_TEST_FILE_KEY)));
- writer.writeUTF(configuration.getString(ConfigVars.HDFS_TEST_STRING_KEY));
+ FSDataOutputStream writer = hdfsFsHandle.create(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
+ writer.writeUTF(configuration.getString(Config.HDFS_TEST_STRING_KEY));
writer.close();
// Read the file and compare to test string
- FSDataInputStream reader = hdfsFsHandle.open(new Path(configuration.getString(ConfigVars.HDFS_TEST_FILE_KEY)));
- assertEquals(reader.readUTF(), configuration.getString(ConfigVars.HDFS_TEST_STRING_KEY));
+ FSDataInputStream reader = hdfsFsHandle.open(new Path(configuration.getString(Config.HDFS_TEST_FILE_KEY)));
+ assertEquals(reader.readUTF(), configuration.getString(Config.HDFS_TEST_STRING_KEY));
reader.close();
hdfsFsHandle.close();
URL url = new URL(
String.format( "http://localhost:%s/webhdfs/v1?op=GETHOMEDIRECTORY&user.name=guest",
- configuration.getInt( ConfigVars.HDFS_NAMENODE_HTTP_PORT_KEY ) ) );
+ configuration.getInt( Config.HDFS_NAMENODE_HTTP_PORT_KEY ) ) );
URLConnection connection = url.openConnection();
connection.setRequestProperty( "Accept-Charset", "UTF-8" );
BufferedReader response = new BufferedReader( new InputStreamReader( connection.getInputStream() ) );
@@ -238,16 +241,16 @@ public void hdfsShouldStart() throws Exception {
@Test
public void hBaseShouldStart() throws Exception {
- String tableName = configuration.getString(ConfigVars.HBASE_TEST_TABLE_NAME_KEY);
- String colFamName = configuration.getString(ConfigVars.HBASE_TEST_COL_FAMILY_NAME_KEY);
- String colQualiferName = configuration.getString(ConfigVars.HBASE_TEST_COL_QUALIFIER_NAME_KEY);
- Integer numRowsToPut = configuration.getInt(ConfigVars.HBASE_TEST_NUM_ROWS_TO_PUT_KEY);
+ String tableName = configuration.getString(Config.HBASE_TEST_TABLE_NAME_KEY);
+ String colFamName = configuration.getString(Config.HBASE_TEST_COL_FAMILY_NAME_KEY);
+ String colQualiferName = configuration.getString(Config.HBASE_TEST_COL_QUALIFIER_NAME_KEY);
+ Integer numRowsToPut = configuration.getInt(Config.HBASE_TEST_NUM_ROWS_TO_PUT_KEY);
org.apache.hadoop.conf.Configuration hbaseConfiguration = HBaseConfiguration.create();
- hbaseConfiguration.set("hbase.zookeeper.quorum", configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY));
- hbaseConfiguration.setInt("hbase.zookeeper.property.clientPort", configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY));
- hbaseConfiguration.set("hbase.master", "127.0.0.1:" + configuration.getInt(ConfigVars.HBASE_MASTER_PORT_KEY));
- hbaseConfiguration.set("zookeeper.znode.parent", configuration.getString(ConfigVars.HBASE_ZNODE_PARENT_KEY));
+ hbaseConfiguration.set("hbase.zookeeper.quorum", configuration.getString(Config.ZOOKEEPER_HOST_KEY));
+ hbaseConfiguration.setInt("hbase.zookeeper.property.clientPort", configuration.getInt(Config.ZOOKEEPER_PORT_KEY));
+ hbaseConfiguration.set("hbase.master", "127.0.0.1:" + configuration.getInt(Config.HBASE_MASTER_PORT_KEY));
+ hbaseConfiguration.set("zookeeper.znode.parent", configuration.getString(Config.HBASE_ZNODE_PARENT_KEY));
LOGGER.info("HBASE: Creating table {} with column family {}", tableName, colFamName);
@@ -273,9 +276,9 @@ public void oozieShouldStart() throws Exception {
LOGGER.info("OOZIE: Test Submit Workflow Start");
org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
- conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
+ conf.set("fs.default.name", "hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
- URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY));
+ URI uri = URI.create ("hdfs://127.0.0.1:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY));
FileSystem hdfsFs = FileSystem.get (uri, conf);
diff --git a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkIntegrationTest.java b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkIntegrationTest.java
index bba1e40a..edd8e1d2 100644
--- a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkIntegrationTest.java
+++ b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkIntegrationTest.java
@@ -23,13 +23,13 @@
*/
package fr.jetoile.sample.integrationtest;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.ninja_squad.dbsetup.Operations;
import com.ninja_squad.dbsetup.operation.Operation;
import fr.jetoile.hadoop.test.hdfs.HdfsUtils;
import fr.jetoile.hadoop.test.hive.HiveConnectionUtils;
import fr.jetoile.hadoop.test.hive.HiveSetup;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.exception.BootstrapException;
import fr.jetoile.sample.exception.NotFoundServiceException;
@@ -58,6 +58,7 @@
import static com.ninja_squad.dbsetup.Operations.sql;
import static org.fest.assertions.Assertions.assertThat;
+@Ignore
public class SparkIntegrationTest {
static private Logger LOGGER = LoggerFactory.getLogger(SparkIntegrationTest.class);
@@ -76,7 +77,6 @@ public static void setUp() throws BootstrapException, SQLException, ClassNotFoun
throw new BootstrapException("bad config", e);
}
-// HadoopBootstrap.INSTANCE.startAll();
HadoopBootstrap.INSTANCE
.start(Component.ZOOKEEPER)
.start(Component.HDFS)
@@ -87,11 +87,11 @@ public static void setUp() throws BootstrapException, SQLException, ClassNotFoun
sequenceOf(sql("CREATE EXTERNAL TABLE IF NOT EXISTS default.test(id INT, value STRING) " +
" ROW FORMAT DELIMITED FIELDS TERMINATED BY ';'" +
" STORED AS TEXTFILE" +
- " LOCATION 'hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test'"),
+ " LOCATION 'hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test'"),
sql("CREATE EXTERNAL TABLE IF NOT EXISTS default.test_parquet(id INT, value STRING) " +
" ROW FORMAT DELIMITED FIELDS TERMINATED BY ';'" +
" STORED AS PARQUET" +
- " LOCATION 'hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet'"));
+ " LOCATION 'hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet'"));
DROP_TABLES =
sequenceOf(sql("DROP TABLE IF EXISTS default.test"),
@@ -179,10 +179,10 @@ public void spark_should_create_a_parquet_file() throws SQLException, IOExceptio
HiveContext sqlContext = new HiveContext(context.sc());
DataFrame sql = sqlContext.sql("SELECT * FROM default.test");
- sql.write().parquet("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
+ sql.write().parquet("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
FileSystem fileSystem = HdfsUtils.INSTANCE.getFileSystem();
- assertThat(fileSystem.exists(new Path("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet"))).isTrue();
+ assertThat(fileSystem.exists(new Path("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet"))).isTrue();
context.close();
}
@@ -201,10 +201,10 @@ public void spark_should_read_parquet_file() throws IOException {
HiveContext hiveContext = new HiveContext(context.sc());
DataFrame sql = hiveContext.sql("SELECT * FROM default.test");
- sql.write().parquet("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
+ sql.write().parquet("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
FileSystem fileSystem = HdfsUtils.INSTANCE.getFileSystem();
- assertThat(fileSystem.exists(new Path("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet"))).isTrue();
+ assertThat(fileSystem.exists(new Path("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet"))).isTrue();
context.close();
@@ -212,7 +212,7 @@ public void spark_should_read_parquet_file() throws IOException {
context = new JavaSparkContext(conf);
SQLContext sqlContext = new SQLContext(context);
- DataFrame file = sqlContext.read().parquet("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
+ DataFrame file = sqlContext.read().parquet("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
DataFrame select = file.select("id", "value");
Row[] rows = select.collect();
diff --git a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkSolrIntegrationTest.java b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkSolrIntegrationTest.java
index dd40b154..c0ef126f 100644
--- a/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkSolrIntegrationTest.java
+++ b/hadoop-bootstrap-standalone/src/test/java/fr/jetoile/sample/integrationtest/SparkSolrIntegrationTest.java
@@ -23,8 +23,6 @@
*/
package fr.jetoile.sample.integrationtest;
-import com.github.sakserv.minicluster.config.ConfigVars;
-//import com.lucidworks.spark.SolrSupport;
import com.lucidworks.spark.SolrSupport;
import com.ninja_squad.dbsetup.Operations;
import com.ninja_squad.dbsetup.operation.Operation;
@@ -32,6 +30,7 @@
import fr.jetoile.hadoop.test.hive.HiveConnectionUtils;
import fr.jetoile.hadoop.test.hive.HiveSetup;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopBootstrap;
import fr.jetoile.sample.component.SolrCloudBootstrap;
import fr.jetoile.sample.exception.BootstrapException;
@@ -64,6 +63,8 @@
import static junit.framework.TestCase.assertNotNull;
import static org.fest.assertions.Assertions.assertThat;
+
+@Ignore
public class SparkSolrIntegrationTest {
static private Logger LOGGER = LoggerFactory.getLogger(SparkSolrIntegrationTest.class);
@@ -82,7 +83,6 @@ public static void setUp() throws BootstrapException, SQLException, ClassNotFoun
throw new BootstrapException("bad config", e);
}
-// HadoopBootstrap.INSTANCE.startAll();
HadoopBootstrap.INSTANCE
.start(Component.ZOOKEEPER)
.start(Component.HDFS)
@@ -94,11 +94,11 @@ public static void setUp() throws BootstrapException, SQLException, ClassNotFoun
sequenceOf(sql("CREATE EXTERNAL TABLE IF NOT EXISTS default.test(id INT, value STRING) " +
" ROW FORMAT DELIMITED FIELDS TERMINATED BY ';'" +
" STORED AS TEXTFILE" +
- " LOCATION 'hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test'"),
+ " LOCATION 'hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test'"),
sql("CREATE EXTERNAL TABLE IF NOT EXISTS default.test_parquet(id INT, value STRING) " +
" ROW FORMAT DELIMITED FIELDS TERMINATED BY ';'" +
" STORED AS PARQUET" +
- " LOCATION 'hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet'"));
+ " LOCATION 'hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet'"));
DROP_TABLES =
sequenceOf(sql("DROP TABLE IF EXISTS default.test"),
@@ -114,7 +114,6 @@ public static void tearDown() throws NotFoundServiceException {
.stop(Component.HIVESERVER2)
.stop(Component.HDFS)
.stop(Component.ZOOKEEPER);
-// HadoopBootstrap.INSTANCE.stopAll();
}
@Before
@@ -152,10 +151,10 @@ public void spark_should_read_parquet_file_and_index_into_solr() throws IOExcept
HiveContext hiveContext = new HiveContext(context.sc());
DataFrame sql = hiveContext.sql("SELECT * FROM default.test");
- sql.write().parquet("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
+ sql.write().parquet("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
FileSystem fileSystem = HdfsUtils.INSTANCE.getFileSystem();
- assertThat(fileSystem.exists(new Path("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet"))).isTrue();
+ assertThat(fileSystem.exists(new Path("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet"))).isTrue();
context.close();
@@ -163,7 +162,7 @@ public void spark_should_read_parquet_file_and_index_into_solr() throws IOExcept
context = new JavaSparkContext(conf);
SQLContext sqlContext = new SQLContext(context);
- DataFrame file = sqlContext.read().parquet("hdfs://localhost:" + configuration.getInt(ConfigVars.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
+ DataFrame file = sqlContext.read().parquet("hdfs://localhost:" + configuration.getInt(Config.HDFS_NAMENODE_PORT_KEY) + "/khanh/test_parquet/file.parquet");
DataFrame select = file.select("id", "value");
JavaRDD solrInputDocument = select.toJavaRDD().map(r -> {
@@ -174,7 +173,7 @@ public void spark_should_read_parquet_file_and_index_into_solr() throws IOExcept
});
String collectionName = configuration.getString(SolrCloudBootstrap.SOLR_COLLECTION_NAME);
- String zkHostString = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
+ String zkHostString = configuration.getString(Config.ZOOKEEPER_HOST_KEY) + ":" + configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
SolrSupport.indexDocs(zkHostString, collectionName, 1000, solrInputDocument);
//then
diff --git a/hadoop-bootstrap-zookeeper/src/main/java/fr/jetoile/sample/component/ZookeeperBootstrap.java b/hadoop-bootstrap-zookeeper/src/main/java/fr/jetoile/sample/component/ZookeeperBootstrap.java
index 8256d27a..c104f947 100644
--- a/hadoop-bootstrap-zookeeper/src/main/java/fr/jetoile/sample/component/ZookeeperBootstrap.java
+++ b/hadoop-bootstrap-zookeeper/src/main/java/fr/jetoile/sample/component/ZookeeperBootstrap.java
@@ -23,9 +23,9 @@
*/
package fr.jetoile.sample.component;
-import com.github.sakserv.minicluster.config.ConfigVars;
import com.github.sakserv.minicluster.impl.ZookeeperLocalCluster;
import fr.jetoile.sample.Component;
+import fr.jetoile.sample.Config;
import fr.jetoile.sample.HadoopUtils;
import fr.jetoile.sample.exception.BootstrapException;
import org.apache.commons.configuration.Configuration;
@@ -77,9 +77,9 @@ private void loadConfig() throws BootstrapException {
} catch (ConfigurationException e) {
throw new BootstrapException("bad config", e);
}
- port = configuration.getInt(ConfigVars.ZOOKEEPER_PORT_KEY);
- localDir = configuration.getString(ConfigVars.ZOOKEEPER_TEMP_DIR_KEY);
- host = configuration.getString(ConfigVars.ZOOKEEPER_HOST_KEY);
+ port = configuration.getInt(Config.ZOOKEEPER_PORT_KEY);
+ localDir = configuration.getString(Config.ZOOKEEPER_TEMP_DIR_KEY);
+ host = configuration.getString(Config.ZOOKEEPER_HOST_KEY);
}
diff --git a/pom.xml b/pom.xml
index 314e80c6..9056299e 100755
--- a/pom.xml
+++ b/pom.xml
@@ -23,6 +23,7 @@
hadoop-bootstrap-hbase
hadoop-bootstrap-standalone
hadoop-bootstrap-commons
+ hadoop-bootstrap-client
@@ -60,16 +61,50 @@
1.1-SNAPSHOT
+ 1.6.0
+
+ com.ninja-squad
+ DbSetup
+ ${dbSetup.version}
+
+
+
+ org.apache.hive
+ hive-jdbc
+ ${hive.version}
+
+
fr.jetoile.sample
- hadoop-bootstrap-test-utils
- 1.0-SNAPSHOT
+ hadoop-bootstrap-client-solrcloud
+ ${hadoop-bootstrap.version}
+ test
+
+
+
+ fr.jetoile.sample
+ hadoop-bootstrap-client-spark
+ ${hadoop-bootstrap.version}
+ test
+
+
+
+ fr.jetoile.sample
+ hadoop-bootstrap-client-hdfs
+ ${hadoop-bootstrap.version}
+ test
+
+
+
+ fr.jetoile.sample
+ hadoop-bootstrap-client-hive
+ ${hadoop-bootstrap.version}
test