diff --git a/hadoop-unit-commons/src/main/java/fr/jetoile/hadoopunit/Component.java b/hadoop-unit-commons/src/main/java/fr/jetoile/hadoopunit/Component.java
index d6456733..f1e3b8aa 100644
--- a/hadoop-unit-commons/src/main/java/fr/jetoile/hadoopunit/Component.java
+++ b/hadoop-unit-commons/src/main/java/fr/jetoile/hadoopunit/Component.java
@@ -20,6 +20,7 @@
public enum Component {
HDFS("hdfs", "fr.jetoile.hadoopunit.component.HdfsBootstrap", "hdfs.artifact"),
ZOOKEEPER("zookeeper", "fr.jetoile.hadoopunit.component.ZookeeperBootstrap", "zookeeper.artifact"),
+ YARN("yarn", "fr.jetoile.hadoopunit.component.YarnBootstrap", "yarn.artifact"),
ALLUXIO("alluxio", "fr.jetoile.hadoopunit.component.AlluxioBootstrap", "alluxio.artifact"),
HIVEMETA("hivemeta", "fr.jetoile.hadoopunit.component.HiveMetastoreBootstrap", "hivemeta.artifact"),
HIVESERVER2("hiveserver2", "fr.jetoile.hadoopunit.component.HiveServer2Bootstrap", "hiveserver2.artifact"),
diff --git a/hadoop-unit-standalone/pom.xml b/hadoop-unit-standalone/pom.xml
index 31be2ef8..1e8c9f8e 100644
--- a/hadoop-unit-standalone/pom.xml
+++ b/hadoop-unit-standalone/pom.xml
@@ -175,6 +175,10 @@
hadoop-auth
org.apache.hadoop
+
+ hadoop-yarn-api
+ org.apache.hadoop
+
@@ -206,6 +210,13 @@
test
+
+ org.apache.hadoop
+ hadoop-yarn-api
+ ${hadoop.version}
+ test
+
+
diff --git a/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties
index 2d0e35d2..bb5c7d15 100644
--- a/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties
+++ b/hadoop-unit-standalone/src/main/resources/hadoop-unit-default.properties
@@ -16,6 +16,7 @@ elasticsearch.artifact=fr.jetoile.hadoop:hadoop-unit-elasticsearch:2.3-SNAPSHOT
neo4j.artifact=fr.jetoile.hadoop:hadoop-unit-neo4j:2.3-SNAPSHOT
knox.artifact=fr.jetoile.hadoop:hadoop-unit-knox:2.3-SNAPSHOT
redis.artifact=fr.jetoile.hadoop:hadoop-unit-redis:2.3-SNAPSHOT
+yarn.artifact=fr.jetoile.hadoop:hadoop-unit-yarn:2.3-SNAPSHOT
maven.central.repo=https://repo.maven.apache.org/maven2/
maven.local.repo=/home/khanh/.m2/repository
diff --git a/hadoop-unit-standalone/src/main/resources/hadoop.properties b/hadoop-unit-standalone/src/main/resources/hadoop.properties
index 7bc1dbf3..3d4c38f0 100755
--- a/hadoop-unit-standalone/src/main/resources/hadoop.properties
+++ b/hadoop-unit-standalone/src/main/resources/hadoop.properties
@@ -1,6 +1,6 @@
zookeeper=true
hdfs=true
-alluxio=true
+#alluxio=true
hivemeta=true
hiveserver2=true
#kafka=true
@@ -12,5 +12,6 @@ hiveserver2=true
#elasticsearch=true
#neo4j=true
#knox=true
-redis=true
+#redis=true
+yarn=true
diff --git a/hadoop-unit-standalone/src/test/java/fr/jetoile/hadoopunit/integrationtest/ManualIntegrationBootstrapTest.java b/hadoop-unit-standalone/src/test/java/fr/jetoile/hadoopunit/integrationtest/ManualIntegrationBootstrapTest.java
index c5940349..f40ae95d 100644
--- a/hadoop-unit-standalone/src/test/java/fr/jetoile/hadoopunit/integrationtest/ManualIntegrationBootstrapTest.java
+++ b/hadoop-unit-standalone/src/test/java/fr/jetoile/hadoopunit/integrationtest/ManualIntegrationBootstrapTest.java
@@ -23,9 +23,12 @@
import com.datastax.driver.core.Session;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.mongodb.*;
+import fr.jetoile.hadoopunit.Component;
+import fr.jetoile.hadoopunit.HadoopBootstrap;
import fr.jetoile.hadoopunit.HadoopUnitConfig;
import fr.jetoile.hadoopunit.exception.BootstrapException;
import fr.jetoile.hadoopunit.exception.NotFoundServiceException;
+import fr.jetoile.hadoopunit.integrationtest.simpleyarnapp.Client;
import fr.jetoile.hadoopunit.test.alluxio.AlluxioUtils;
import fr.jetoile.hadoopunit.test.hdfs.HdfsUtils;
import fr.jetoile.hadoopunit.test.kafka.KafkaConsumerUtils;
@@ -70,6 +73,9 @@
import java.net.*;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Paths;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.cert.CertificateException;
@@ -787,5 +793,28 @@ public void neo4jShouldStartWithRealDriver() {
assertEquals("Arthur", results.get(0).get("name").asString());
}
+ @Test
+ public void testYarnLocalClusterIntegrationTest() {
+
+ String[] args = new String[7];
+ args[0] = "whoami";
+ args[1] = "1";
+ args[2] = getClass().getClassLoader().getResource("simple-yarn-app-1.1.0.jar").toString();
+ args[3] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY);
+ args[4] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_HOSTNAME_KEY);
+ args[5] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY);
+ args[6] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY);
+
+
+ try {
+ Client.main(args);
+ } catch(Exception e) {
+ e.printStackTrace();
+ }
+
+
+ }
+
+
}
diff --git a/hadoop-unit-standalone/src/test/java/fr/jetoile/hadoopunit/integrationtest/simpleyarnapp/Client.java b/hadoop-unit-standalone/src/test/java/fr/jetoile/hadoopunit/integrationtest/simpleyarnapp/Client.java
new file mode 100644
index 00000000..869fdbb8
--- /dev/null
+++ b/hadoop-unit-standalone/src/test/java/fr/jetoile/hadoopunit/integrationtest/simpleyarnapp/Client.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package fr.jetoile.hadoopunit.integrationtest.simpleyarnapp;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+import org.apache.hadoop.yarn.api.records.*;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.client.api.YarnClientApplication;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class Client {
+
+ Configuration conf = new YarnConfiguration();
+
+ public void run(String[] args) throws Exception {
+ final String command = args[0];
+ final int n = Integer.valueOf(args[1]);
+ final Path jarPath = new Path(args[2]);
+ final String resourceManagerAddress = args[3];
+ final String resourceManagerHostname = args[4];
+ final String resourceManagerSchedulerAddress = args[5];
+ final String resourceManagerResourceTrackerAddress = args[6];
+
+ // Create yarnClient
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.set("yarn.resourcemanager.address", resourceManagerAddress);
+ conf.set("yarn.resourcemanager.hostname", resourceManagerHostname);
+ conf.set("yarn.resourcemanager.scheduler.address", resourceManagerSchedulerAddress);
+ conf.set("yarn.resourcemanager.resource-tracker.address", resourceManagerResourceTrackerAddress);
+ YarnClient yarnClient = YarnClient.createYarnClient();
+ yarnClient.init(conf);
+ yarnClient.start();
+
+ // Create application via yarnClient
+ YarnClientApplication app = yarnClient.createApplication();
+
+ // Set up the container launch context for the application master
+ ContainerLaunchContext amContainer =
+ Records.newRecord(ContainerLaunchContext.class);
+ amContainer.setCommands(
+ Collections.singletonList(
+ "$JAVA_HOME/bin/java" +
+ " -Xmx256M" +
+ " com.hortonworks.simpleyarnapp.ApplicationMaster" +
+ " " + command +
+ " " + String.valueOf(n) +
+ " " + resourceManagerAddress +
+ " " + resourceManagerHostname +
+ " " + resourceManagerSchedulerAddress +
+ " " + resourceManagerResourceTrackerAddress +
+ " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"
+ )
+ );
+
+ // Setup jar for ApplicationMaster
+ LocalResource appMasterJar = Records.newRecord(LocalResource.class);
+ setupAppMasterJar(jarPath, appMasterJar);
+ amContainer.setLocalResources(
+ Collections.singletonMap("simple-yarn-app-1.1.0.jar", appMasterJar));
+
+ // Setup CLASSPATH for ApplicationMaster
+ Map appMasterEnv = new HashMap();
+ setupAppMasterEnv(appMasterEnv);
+ amContainer.setEnvironment(appMasterEnv);
+
+ // Set up resource type requirements for ApplicationMaster
+ Resource capability = Records.newRecord(Resource.class);
+ capability.setVirtualCores(1);
+
+ // Finally, set-up ApplicationSubmissionContext for the application
+ ApplicationSubmissionContext appContext =
+ app.getApplicationSubmissionContext();
+ appContext.setApplicationName("simple-yarn-app"); // application name
+ appContext.setAMContainerSpec(amContainer);
+ appContext.setResource(capability);
+ appContext.setQueue("default"); // queue
+
+ // Submit application
+ ApplicationId appId = appContext.getApplicationId();
+ System.out.println("Submitting application " + appId);
+ yarnClient.submitApplication(appContext);
+
+ ApplicationReport appReport = yarnClient.getApplicationReport(appId);
+ YarnApplicationState appState = appReport.getYarnApplicationState();
+ while (appState != YarnApplicationState.FINISHED &&
+ appState != YarnApplicationState.KILLED &&
+ appState != YarnApplicationState.FAILED) {
+ Thread.sleep(100);
+ appReport = yarnClient.getApplicationReport(appId);
+ appState = appReport.getYarnApplicationState();
+ }
+
+ System.out.println(
+ "Application " + appId + " finished with" +
+ " state " + appState +
+ " at " + appReport.getFinishTime());
+
+ }
+
+ private void setupAppMasterJar(Path jarPath, LocalResource appMasterJar) throws IOException {
+ FileStatus jarStat = FileSystem.get(conf).getFileStatus(jarPath);
+ appMasterJar.setResource(ConverterUtils.getYarnUrlFromPath(jarPath));
+ appMasterJar.setSize(jarStat.getLen());
+ appMasterJar.setTimestamp(jarStat.getModificationTime());
+ appMasterJar.setType(LocalResourceType.FILE);
+ appMasterJar.setVisibility(LocalResourceVisibility.PUBLIC);
+ }
+
+ @SuppressWarnings("deprecation")
+ private void setupAppMasterEnv(Map appMasterEnv) {
+ for (String c : conf.getStrings(
+ YarnConfiguration.YARN_APPLICATION_CLASSPATH,
+ YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
+ Apps.addToEnvironment(appMasterEnv, Environment.CLASSPATH.name(),
+ c.trim());
+ }
+ Apps.addToEnvironment(appMasterEnv,
+ Environment.CLASSPATH.name(),
+ Environment.PWD.$() + File.separator + "*");
+ }
+
+ public static void main(String[] args) throws Exception {
+ Client c = new Client();
+ c.run(args);
+ }
+
+}
diff --git a/hadoop-unit-standalone/src/test/resources/simple-yarn-app-1.1.0.jar b/hadoop-unit-standalone/src/test/resources/simple-yarn-app-1.1.0.jar
new file mode 100644
index 00000000..5ed41759
Binary files /dev/null and b/hadoop-unit-standalone/src/test/resources/simple-yarn-app-1.1.0.jar differ
diff --git a/hadoop-unit-yarn/pom.xml b/hadoop-unit-yarn/pom.xml
new file mode 100644
index 00000000..98675022
--- /dev/null
+++ b/hadoop-unit-yarn/pom.xml
@@ -0,0 +1,27 @@
+
+
+
+ hadoop-unit
+ fr.jetoile.hadoop
+ 2.3-SNAPSHOT
+
+ 4.0.0
+
+ hadoop-unit-yarn
+
+
+
+
+ fr.jetoile.hadoop
+ hadoop-unit-commons-hadoop
+
+
+
+ com.github.sakserv
+ hadoop-mini-clusters-yarn
+
+
+
+
\ No newline at end of file
diff --git a/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnBootstrap.java b/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnBootstrap.java
new file mode 100644
index 00000000..5c562dca
--- /dev/null
+++ b/hadoop-unit-yarn/src/main/java/fr/jetoile/hadoopunit/component/YarnBootstrap.java
@@ -0,0 +1,168 @@
+package fr.jetoile.hadoopunit.component;
+
+import com.github.sakserv.minicluster.impl.YarnLocalCluster;
+import com.github.sakserv.minicluster.util.FileUtils;
+import com.github.sakserv.minicluster.yarn.InJvmContainerExecutor;
+import fr.jetoile.hadoopunit.Component;
+import fr.jetoile.hadoopunit.HadoopUnitConfig;
+import fr.jetoile.hadoopunit.exception.BootstrapException;
+import fr.jetoile.hadoopunit.exception.NotFoundServiceException;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.util.Map;
+
+public class YarnBootstrap implements BootstrapHadoop {
+ final public static String NAME = Component.YARN.name();
+
+ final private Logger LOGGER = LoggerFactory.getLogger(YarnBootstrap.class);
+
+ private State state = State.STOPPED;
+
+ private Configuration configuration;
+
+ private YarnLocalCluster yarnLocalCluster = null;
+
+ private int yarnNumNodeManagers;
+ private int yarnNumLocalDirs;
+ private int yarnNumLogDirs;
+ private String yarnRMAddress;
+ private String yarnRMHostname;
+ private String yarnRMSchedulerAddress;
+ private String yarnRMResourceTrackerAddress;
+ private String yarnRMWebappAddress;
+ private boolean inJvmContainer;
+
+ public YarnBootstrap() {
+ if (yarnLocalCluster == null) {
+ try {
+ loadConfig();
+ } catch (BootstrapException | NotFoundServiceException e) {
+ LOGGER.error("unable to load configuration", e);
+ }
+ }
+ }
+
+ private void init() {
+
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+
+ @Override
+ public String getProperties() {
+ return "[" +
+ "RM address:" + yarnRMAddress +
+ ", RM Scheduler address:" + yarnRMSchedulerAddress +
+ ", RM Resource Tracker address:" + yarnRMSchedulerAddress +
+ ", RM Webapp address:" + yarnRMSchedulerAddress +
+ ", InJvmContainer:" + inJvmContainer +
+ "]";
+ }
+
+ private void build() throws NotFoundServiceException {
+ yarnLocalCluster = new YarnLocalCluster.Builder()
+ .setNumNodeManagers(yarnNumNodeManagers)
+ .setNumLocalDirs(yarnNumLocalDirs)
+ .setNumLogDirs(yarnNumLogDirs)
+ .setResourceManagerAddress(yarnRMAddress)
+ .setResourceManagerHostname(yarnRMHostname)
+ .setResourceManagerSchedulerAddress(yarnRMSchedulerAddress)
+ .setResourceManagerResourceTrackerAddress(yarnRMResourceTrackerAddress)
+ .setResourceManagerWebappAddress(yarnRMWebappAddress)
+ .setUseInJvmContainerExecutor(inJvmContainer)
+ .setConfig(new org.apache.hadoop.conf.Configuration())
+ .build();
+
+ }
+
+ private void loadConfig() throws BootstrapException, NotFoundServiceException {
+
+ try {
+ configuration = new PropertiesConfiguration(HadoopUnitConfig.DEFAULT_PROPS_FILE);
+ } catch (ConfigurationException e) {
+ throw new BootstrapException("bad config", e);
+ }
+
+ yarnNumNodeManagers = configuration.getInt(HadoopUnitConfig.YARN_NUM_NODE_MANAGERS_KEY);
+ yarnNumLocalDirs = configuration.getInt(HadoopUnitConfig.YARN_NUM_LOCAL_DIRS_KEY);
+ yarnNumLogDirs = configuration.getInt(HadoopUnitConfig.YARN_NUM_LOG_DIRS_KEY);
+ yarnRMAddress = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY);
+ yarnRMHostname = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_HOSTNAME_KEY);
+ yarnRMSchedulerAddress = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY);
+ yarnRMResourceTrackerAddress = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY);
+ yarnRMWebappAddress = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_WEBAPP_ADDRESS_KEY);
+ inJvmContainer = configuration.getBoolean(HadoopUnitConfig.YARN_USE_IN_JVM_CONTAINER_EXECUTOR_KEY);
+ }
+
+ @Override
+ public void loadConfig(Map configs) {
+
+ }
+
+ @Override
+ public Bootstrap start() {
+ if (state == State.STOPPED) {
+ state = State.STARTING;
+ LOGGER.info("{} is starting", this.getClass().getName());
+ init();
+ try {
+ build();
+ } catch (NotFoundServiceException e) {
+ LOGGER.error("unable to add yarn", e);
+ }
+ try {
+ yarnLocalCluster.start();
+ } catch (Exception e) {
+ LOGGER.error("unable to add yarn", e);
+ }
+ state = State.STARTED;
+ LOGGER.info("{} is started", this.getClass().getName());
+ }
+
+ return this;
+ }
+
+ @Override
+ public Bootstrap stop() {
+ if (state == State.STARTED) {
+ state = State.STOPPING;
+ LOGGER.info("{} is stopping", this.getClass().getName());
+ try {
+ System.setSecurityManager(new InJvmContainerExecutor.SystemExitAllowSecurityManager());
+ yarnLocalCluster.stop();
+ cleanup();
+ } catch (Exception e) {
+ LOGGER.error("unable to stop yarn", e);
+ }
+ state = State.STOPPED;
+ LOGGER.info("{} is stopped", this.getClass().getName());
+ }
+ return this;
+ }
+
+ private void cleanup() {
+ if (new File("./target/classes").exists()) {
+ FileUtils.deleteFolder("./target/" + getTestName());
+ } else {
+ FileUtils.deleteFolder("./target");
+ }
+ }
+
+ @Override
+ public org.apache.hadoop.conf.Configuration getConfiguration() {
+ return yarnLocalCluster.getConfig();
+ }
+
+ public String getTestName() {
+ return yarnLocalCluster.getTestName();
+ }
+
+}
diff --git a/hadoop-unit-yarn/src/main/resources/META-INF/services/fr.jetoile.hadoopunit.component.Bootstrap b/hadoop-unit-yarn/src/main/resources/META-INF/services/fr.jetoile.hadoopunit.component.Bootstrap
new file mode 100644
index 00000000..e0d54cb3
--- /dev/null
+++ b/hadoop-unit-yarn/src/main/resources/META-INF/services/fr.jetoile.hadoopunit.component.Bootstrap
@@ -0,0 +1 @@
+fr.jetoile.hadoopunit.component.YarnBootstrap
\ No newline at end of file
diff --git a/hadoop-unit-yarn/src/main/resources/hadoop-unit-default.properties b/hadoop-unit-yarn/src/main/resources/hadoop-unit-default.properties
new file mode 100644
index 00000000..bfe01ecd
--- /dev/null
+++ b/hadoop-unit-yarn/src/main/resources/hadoop-unit-default.properties
@@ -0,0 +1,175 @@
+#HADOOP_HOME=/opt/hadoop
+
+# Zookeeper
+zookeeper.temp.dir=/tmp/embedded_zk
+zookeeper.host=127.0.0.1
+zookeeper.port=22010
+
+# Hive
+hive.scratch.dir=/tmp/hive_scratch_dir
+hive.warehouse.dir=/tmp/warehouse_dir
+
+# Hive Metastore
+hive.metastore.hostname=localhost
+hive.metastore.port=20102
+hive.metastore.derby.db.dir=metastore_db
+
+# Hive Server2
+hive.server2.hostname=localhost
+hive.server2.port=20103
+
+# Hive Test
+hive.test.database.name=default
+hive.test.table.name=test_table
+
+
+# HDFS
+hdfs.namenode.host=localhost
+hdfs.namenode.port=20112
+hdfs.namenode.http.port=50070
+hdfs.temp.dir=/tmp/embedded_hdfs
+hdfs.num.datanodes=1
+hdfs.enable.permissions=false
+hdfs.format=true
+hdfs.enable.running.user.as.proxy.user=true
+
+# HDFS Test
+hdfs.test.file=/tmp/testing
+hdfs.test.string=TESTING
+
+
+# HBase
+hbase.master.port=25111
+hbase.master.info.port=-1
+hbase.num.region.servers=1
+hbase.root.dir=/tmp/embedded_hbase
+hbase.znode.parent=/hbase-unsecure
+hbase.wal.replication.enabled=false
+
+# HBase REST
+hbase.rest.port=28000
+hbase.rest.readonly=false
+hbase.rest.info.port=28080
+hbase.rest.host=0.0.0.0
+hbase.rest.threads.max=100
+hbase.rest.threads.min=2
+
+# HBase Test
+hbase.test.table.name=hbase_test_table
+hbase.test.col.family.name=cf1
+hbase.test.col.qualifier.name=cq1
+hbase.test.num.rows.to.put=50
+
+# Kafka
+kafka.hostname=127.0.0.1
+kafka.port=20111
+
+# Kafka Test
+kafka.test.topic=testtopic
+kafka.test.message.count=10
+kafka.test.broker.id=1
+kafka.test.temp.dir=embedded_kafka
+
+#SolR + SolRCloud
+solr.dir=solr
+
+#SolR
+solr.collection.internal.name=collection1_shard1_replica1
+
+#SolRCloud
+solr.collection.name=collection1
+solr.cloud.port=8983
+
+
+
+
+
+# YARN
+yarn.num.node.managers=1
+yarn.num.local.dirs=1
+yarn.num.log.dirs=1
+yarn.resource.manager.address=localhost:37001
+yarn.resource.manager.hostname=localhost
+yarn.resource.manager.scheduler.address=localhost:37002
+yarn.resource.manager.resource.tracker.address=localhost:37003
+yarn.resource.manager.webapp.address=localhost:37004
+yarn.use.in.jvm.container.executor=false
+
+# MR
+mr.job.history.address=localhost:37005
+
+# Oozie
+oozie.tmp.dir=/tmp/oozie_tmp
+oozie.test.dir=/tmp/embedded_oozie
+oozie.home.dir=/tmp/oozie_home
+oozie.username=blah
+oozie.groupname=testgroup
+oozie.hdfs.share.lib.dir=/tmp/share_lib
+oozie.share.lib.create=true
+oozie.local.share.lib.cache.dir=/tmp/share_lib_cache
+oozie.purge.local.share.lib.cache=false
+oozie.sharelib.path=/home/khanh/github
+oozie.sharelib.name=oozie-4.2.0.2.6.1.0-129-distro.tar.gz
+oozie.port=20113
+oozie.host=localhost
+oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING
+#oozie.sharelib.component=OOZIE,HCATALOG,DISTCP,MAPREDUCE_STREAMING,PIG,HIVE,HIVE2,SQOOP,SPARK
+
+# ElasticSearch
+elasticsearch.version=5.4.3
+elasticsearch.ip=127.0.0.1
+elasticsearch.http.port=14433
+elasticsearch.tcp.port=14533
+elasticsearch.index.name=test_index
+elasticsearch.cluster.name=elasticsearch
+#elasticsearch.download.url=https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.4.3.zip
+
+# MongoDB
+mongo.ip=127.0.0.1
+mongo.port=13333
+mongo.database.name=test_database
+mongo.collection.name=test_collection
+
+# Cassandra
+cassandra.ip=127.0.0.1
+cassandra.port=13433
+cassandra.temp.dir=/tmp/embedded_cassandra
+
+# Neo4j
+neo4j.ip=127.0.0.1
+neo4j.port=13533
+neo4j.temp.dir=/tmp/embedded_neo4j
+
+# KNOX
+knox.host=localhost
+knox.port=8888
+knox.path=gateway
+knox.cluster=mycluster
+knox.home.dir=/tmp/embedded_knox
+knox.service=namenode,webhdfs,webhbase
+
+# Alluxio
+#alluxio.work.dir=/tmp/alluxio
+alluxio.work.dir=hdfs://localhost:20112/alluxio
+alluxio.hostname=localhost
+alluxio.master.port=14001
+alluxio.master.web.port=14002
+alluxio.proxy.web.port=14100
+alluxio.worker.web.port=14003
+alluxio.worker.data.port=14004
+alluxio.worker.port=14005
+alluxio.webapp.directory=conf/alluxio/webapp
+
+
+# Redis
+redis.port=6379
+redis.download.url=http://download.redis.io/releases/
+redis.version=4.0.0
+redis.cleanup.installation=false
+redis.temp.dir=/tmp/redis
+redis.type=SERVER
+#redis.type=CLUSTER
+#redis.type=MASTER_SLAVE
+#redis.type=SENTINEL
+#redis.slave.ports=6380
+#redis.sentinel.ports=36479,36480,36481,36482,36483
diff --git a/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrapTest.java b/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrapTest.java
new file mode 100644
index 00000000..a7ce621d
--- /dev/null
+++ b/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/YarnBootstrapTest.java
@@ -0,0 +1,105 @@
+package fr.jetoile.hadoopunit.component;
+
+import fr.jetoile.hadoopunit.Component;
+import fr.jetoile.hadoopunit.HadoopBootstrap;
+import fr.jetoile.hadoopunit.HadoopUnitConfig;
+import fr.jetoile.hadoopunit.component.simpleyarnapp.Client;
+import fr.jetoile.hadoopunit.exception.BootstrapException;
+import fr.jetoile.hadoopunit.exception.NotFoundServiceException;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.configuration.PropertiesConfiguration;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
+import static org.junit.Assert.*;
+
+public class YarnBootstrapTest {
+
+ private static Configuration configuration;
+
+ @BeforeClass
+ public static void setup() throws BootstrapException {
+ HadoopBootstrap.INSTANCE.startAll();
+
+ try {
+ configuration = new PropertiesConfiguration(HadoopUnitConfig.DEFAULT_PROPS_FILE);
+ } catch (ConfigurationException e) {
+ throw new BootstrapException("bad config", e);
+ }
+ }
+
+ @AfterClass
+ public static void tearDown() {
+ HadoopBootstrap.INSTANCE.stopAll();
+ }
+
+ @Test
+ public void testYarnLocalClusterIntegrationTest() {
+
+ String[] args = new String[7];
+ args[0] = "whoami";
+ args[1] = "1";
+ args[2] = getClass().getClassLoader().getResource("simple-yarn-app-1.1.0.jar").toString();
+ args[3] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_ADDRESS_KEY);
+ args[4] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_HOSTNAME_KEY);
+ args[5] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_SCHEDULER_ADDRESS_KEY);
+ args[6] = configuration.getString(HadoopUnitConfig.YARN_RESOURCE_MANAGER_RESOURCE_TRACKER_ADDRESS_KEY);
+
+
+ try {
+ Client.main(args);
+ } catch(Exception e) {
+ e.printStackTrace();
+ }
+
+ // simple yarn app running "whoami",
+ // validate the container contents matches the java user.name
+ assertEquals(System.getProperty("user.name"), getStdoutContents());
+
+ }
+
+ public String getStdoutContents() {
+ String contents = "";
+ try {
+ byte[] encoded = Files.readAllBytes(Paths.get(getStdoutPath()));
+ contents = new String(encoded, Charset.defaultCharset()).trim();
+ } catch (IOException e) {
+ e.printStackTrace();
+ } catch (NotFoundServiceException e) {
+ e.printStackTrace();
+ }
+ return contents;
+ }
+
+ public String getStdoutPath() throws NotFoundServiceException {
+ BootstrapHadoop yarn = (BootstrapHadoop) HadoopBootstrap.INSTANCE.getService(Component.YARN);
+ String yarnName = ((YarnBootstrap) yarn).getTestName();
+
+ File dir = new File("./target/" + yarnName);
+ String[] nmDirs = dir.list();
+ for (String nmDir: nmDirs) {
+ if (nmDir.contains("logDir")) {
+ String[] appDirs = new File(dir.toString() + "/" + nmDir).list();
+ for (String appDir: appDirs) {
+ if (appDir.contains("0001")) {
+ String[] containerDirs = new File(dir.toString() + "/" + nmDir + "/" + appDir).list();
+ for (String containerDir: containerDirs) {
+ if(containerDir.contains("000002")) {
+ return dir.toString() + "/" + nmDir + "/" + appDir + "/" + containerDir + "/stdout";
+ }
+ }
+ }
+ }
+ }
+ }
+ return "";
+ }
+}
\ No newline at end of file
diff --git a/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/simpleyarnapp/Client.java b/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/simpleyarnapp/Client.java
new file mode 100644
index 00000000..16870e74
--- /dev/null
+++ b/hadoop-unit-yarn/src/test/java/fr/jetoile/hadoopunit/component/simpleyarnapp/Client.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package fr.jetoile.hadoopunit.component.simpleyarnapp;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+import org.apache.hadoop.yarn.api.records.*;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.client.api.YarnClientApplication;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+
+public class Client {
+
+ Configuration conf = new YarnConfiguration();
+
+ public void run(String[] args) throws Exception {
+ final String command = args[0];
+ final int n = Integer.valueOf(args[1]);
+ final Path jarPath = new Path(args[2]);
+ final String resourceManagerAddress = args[3];
+ final String resourceManagerHostname = args[4];
+ final String resourceManagerSchedulerAddress = args[5];
+ final String resourceManagerResourceTrackerAddress = args[6];
+
+ // Create yarnClient
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.set("yarn.resourcemanager.address", resourceManagerAddress);
+ conf.set("yarn.resourcemanager.hostname", resourceManagerHostname);
+ conf.set("yarn.resourcemanager.scheduler.address", resourceManagerSchedulerAddress);
+ conf.set("yarn.resourcemanager.resource-tracker.address", resourceManagerResourceTrackerAddress);
+ YarnClient yarnClient = YarnClient.createYarnClient();
+ yarnClient.init(conf);
+ yarnClient.start();
+
+ // Create application via yarnClient
+ YarnClientApplication app = yarnClient.createApplication();
+
+ // Set up the container launch context for the application master
+ ContainerLaunchContext amContainer =
+ Records.newRecord(ContainerLaunchContext.class);
+ amContainer.setCommands(
+ Collections.singletonList(
+ "$JAVA_HOME/bin/java" +
+ " -Xmx256M" +
+ " com.hortonworks.simpleyarnapp.ApplicationMaster" +
+ " " + command +
+ " " + String.valueOf(n) +
+ " " + resourceManagerAddress +
+ " " + resourceManagerHostname +
+ " " + resourceManagerSchedulerAddress +
+ " " + resourceManagerResourceTrackerAddress +
+ " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"
+ )
+ );
+
+ // Setup jar for ApplicationMaster
+ LocalResource appMasterJar = Records.newRecord(LocalResource.class);
+ setupAppMasterJar(jarPath, appMasterJar);
+ amContainer.setLocalResources(
+ Collections.singletonMap("simple-yarn-app-1.1.0.jar", appMasterJar));
+
+ // Setup CLASSPATH for ApplicationMaster
+ Map appMasterEnv = new HashMap();
+ setupAppMasterEnv(appMasterEnv);
+ amContainer.setEnvironment(appMasterEnv);
+
+ // Set up resource type requirements for ApplicationMaster
+ Resource capability = Records.newRecord(Resource.class);
+ capability.setMemory(256);
+ capability.setVirtualCores(1);
+
+ // Finally, set-up ApplicationSubmissionContext for the application
+ ApplicationSubmissionContext appContext =
+ app.getApplicationSubmissionContext();
+ appContext.setApplicationName("simple-yarn-app"); // application name
+ appContext.setAMContainerSpec(amContainer);
+ appContext.setResource(capability);
+ appContext.setQueue("default"); // queue
+
+ // Submit application
+ ApplicationId appId = appContext.getApplicationId();
+ System.out.println("Submitting application " + appId);
+ yarnClient.submitApplication(appContext);
+
+ ApplicationReport appReport = yarnClient.getApplicationReport(appId);
+ YarnApplicationState appState = appReport.getYarnApplicationState();
+ while (appState != YarnApplicationState.FINISHED &&
+ appState != YarnApplicationState.KILLED &&
+ appState != YarnApplicationState.FAILED) {
+ Thread.sleep(100);
+ appReport = yarnClient.getApplicationReport(appId);
+ appState = appReport.getYarnApplicationState();
+ }
+
+ System.out.println(
+ "Application " + appId + " finished with" +
+ " state " + appState +
+ " at " + appReport.getFinishTime());
+
+ }
+
+ private void setupAppMasterJar(Path jarPath, LocalResource appMasterJar) throws IOException {
+ FileStatus jarStat = FileSystem.get(conf).getFileStatus(jarPath);
+ appMasterJar.setResource(ConverterUtils.getYarnUrlFromPath(jarPath));
+ appMasterJar.setSize(jarStat.getLen());
+ appMasterJar.setTimestamp(jarStat.getModificationTime());
+ appMasterJar.setType(LocalResourceType.FILE);
+ appMasterJar.setVisibility(LocalResourceVisibility.PUBLIC);
+ }
+
+ @SuppressWarnings("deprecation")
+ private void setupAppMasterEnv(Map appMasterEnv) {
+ for (String c : conf.getStrings(
+ YarnConfiguration.YARN_APPLICATION_CLASSPATH,
+ YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
+ Apps.addToEnvironment(appMasterEnv, Environment.CLASSPATH.name(),
+ c.trim());
+ }
+ Apps.addToEnvironment(appMasterEnv,
+ Environment.CLASSPATH.name(),
+ Environment.PWD.$() + File.separator + "*");
+ }
+
+ public static void main(String[] args) throws Exception {
+ Client c = new Client();
+ c.run(args);
+ }
+
+}
diff --git a/hadoop-unit-yarn/src/test/resources/hadoop-unit-default.properties b/hadoop-unit-yarn/src/test/resources/hadoop-unit-default.properties
new file mode 100644
index 00000000..bfe01ecd
--- /dev/null
+++ b/hadoop-unit-yarn/src/test/resources/hadoop-unit-default.properties
@@ -0,0 +1,175 @@
+#HADOOP_HOME=/opt/hadoop
+
+# Zookeeper
+zookeeper.temp.dir=/tmp/embedded_zk
+zookeeper.host=127.0.0.1
+zookeeper.port=22010
+
+# Hive
+hive.scratch.dir=/tmp/hive_scratch_dir
+hive.warehouse.dir=/tmp/warehouse_dir
+
+# Hive Metastore
+hive.metastore.hostname=localhost
+hive.metastore.port=20102
+hive.metastore.derby.db.dir=metastore_db
+
+# Hive Server2
+hive.server2.hostname=localhost
+hive.server2.port=20103
+
+# Hive Test
+hive.test.database.name=default
+hive.test.table.name=test_table
+
+
+# HDFS
+hdfs.namenode.host=localhost
+hdfs.namenode.port=20112
+hdfs.namenode.http.port=50070
+hdfs.temp.dir=/tmp/embedded_hdfs
+hdfs.num.datanodes=1
+hdfs.enable.permissions=false
+hdfs.format=true
+hdfs.enable.running.user.as.proxy.user=true
+
+# HDFS Test
+hdfs.test.file=/tmp/testing
+hdfs.test.string=TESTING
+
+
+# HBase
+hbase.master.port=25111
+hbase.master.info.port=-1
+hbase.num.region.servers=1
+hbase.root.dir=/tmp/embedded_hbase
+hbase.znode.parent=/hbase-unsecure
+hbase.wal.replication.enabled=false
+
+# HBase REST
+hbase.rest.port=28000
+hbase.rest.readonly=false
+hbase.rest.info.port=28080
+hbase.rest.host=0.0.0.0
+hbase.rest.threads.max=100
+hbase.rest.threads.min=2
+
+# HBase Test
+hbase.test.table.name=hbase_test_table
+hbase.test.col.family.name=cf1
+hbase.test.col.qualifier.name=cq1
+hbase.test.num.rows.to.put=50
+
+# Kafka
+kafka.hostname=127.0.0.1
+kafka.port=20111
+
+# Kafka Test
+kafka.test.topic=testtopic
+kafka.test.message.count=10
+kafka.test.broker.id=1
+kafka.test.temp.dir=embedded_kafka
+
+#SolR + SolRCloud
+solr.dir=solr
+
+#SolR
+solr.collection.internal.name=collection1_shard1_replica1
+
+#SolRCloud
+solr.collection.name=collection1
+solr.cloud.port=8983
+
+
+
+
+
+# YARN
+yarn.num.node.managers=1
+yarn.num.local.dirs=1
+yarn.num.log.dirs=1
+yarn.resource.manager.address=localhost:37001
+yarn.resource.manager.hostname=localhost
+yarn.resource.manager.scheduler.address=localhost:37002
+yarn.resource.manager.resource.tracker.address=localhost:37003
+yarn.resource.manager.webapp.address=localhost:37004
+yarn.use.in.jvm.container.executor=false
+
+# MR
+mr.job.history.address=localhost:37005
+
+# Oozie
+oozie.tmp.dir=/tmp/oozie_tmp
+oozie.test.dir=/tmp/embedded_oozie
+oozie.home.dir=/tmp/oozie_home
+oozie.username=blah
+oozie.groupname=testgroup
+oozie.hdfs.share.lib.dir=/tmp/share_lib
+oozie.share.lib.create=true
+oozie.local.share.lib.cache.dir=/tmp/share_lib_cache
+oozie.purge.local.share.lib.cache=false
+oozie.sharelib.path=/home/khanh/github
+oozie.sharelib.name=oozie-4.2.0.2.6.1.0-129-distro.tar.gz
+oozie.port=20113
+oozie.host=localhost
+oozie.sharelib.component=OOZIE,MAPREDUCE_STREAMING
+#oozie.sharelib.component=OOZIE,HCATALOG,DISTCP,MAPREDUCE_STREAMING,PIG,HIVE,HIVE2,SQOOP,SPARK
+
+# ElasticSearch
+elasticsearch.version=5.4.3
+elasticsearch.ip=127.0.0.1
+elasticsearch.http.port=14433
+elasticsearch.tcp.port=14533
+elasticsearch.index.name=test_index
+elasticsearch.cluster.name=elasticsearch
+#elasticsearch.download.url=https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.4.3.zip
+
+# MongoDB
+mongo.ip=127.0.0.1
+mongo.port=13333
+mongo.database.name=test_database
+mongo.collection.name=test_collection
+
+# Cassandra
+cassandra.ip=127.0.0.1
+cassandra.port=13433
+cassandra.temp.dir=/tmp/embedded_cassandra
+
+# Neo4j
+neo4j.ip=127.0.0.1
+neo4j.port=13533
+neo4j.temp.dir=/tmp/embedded_neo4j
+
+# KNOX
+knox.host=localhost
+knox.port=8888
+knox.path=gateway
+knox.cluster=mycluster
+knox.home.dir=/tmp/embedded_knox
+knox.service=namenode,webhdfs,webhbase
+
+# Alluxio
+#alluxio.work.dir=/tmp/alluxio
+alluxio.work.dir=hdfs://localhost:20112/alluxio
+alluxio.hostname=localhost
+alluxio.master.port=14001
+alluxio.master.web.port=14002
+alluxio.proxy.web.port=14100
+alluxio.worker.web.port=14003
+alluxio.worker.data.port=14004
+alluxio.worker.port=14005
+alluxio.webapp.directory=conf/alluxio/webapp
+
+
+# Redis
+redis.port=6379
+redis.download.url=http://download.redis.io/releases/
+redis.version=4.0.0
+redis.cleanup.installation=false
+redis.temp.dir=/tmp/redis
+redis.type=SERVER
+#redis.type=CLUSTER
+#redis.type=MASTER_SLAVE
+#redis.type=SENTINEL
+#redis.slave.ports=6380
+#redis.sentinel.ports=36479,36480,36481,36482,36483
diff --git a/hadoop-unit-yarn/src/test/resources/simple-yarn-app-1.1.0.jar b/hadoop-unit-yarn/src/test/resources/simple-yarn-app-1.1.0.jar
new file mode 100644
index 00000000..5ed41759
Binary files /dev/null and b/hadoop-unit-yarn/src/test/resources/simple-yarn-app-1.1.0.jar differ
diff --git a/pom.xml b/pom.xml
index d0d4532a..954fa4da 100755
--- a/pom.xml
+++ b/pom.xml
@@ -54,6 +54,7 @@
hadoop-unit-solrcloud
hadoop-unit-hdfs
hadoop-unit-hbase
+ hadoop-unit-yarn
hadoop-unit-standalone
hadoop-unit-commons
hadoop-unit-commons-hadoop