From 6deaf35c0d2c084c078e567c8fabd3ed59c07b0e Mon Sep 17 00:00:00 2001 From: Frank Austin Nothaft Date: Thu, 30 Jan 2014 22:58:48 -0800 Subject: [PATCH] Changing hadoop fs call to be compatible with Hadoop 1. --- .../scala/edu/berkeley/cs/amplab/adam/rdd/AdamContext.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adam-core/src/main/scala/edu/berkeley/cs/amplab/adam/rdd/AdamContext.scala b/adam-core/src/main/scala/edu/berkeley/cs/amplab/adam/rdd/AdamContext.scala index 4e2abb3f7c..75d46f8c06 100644 --- a/adam-core/src/main/scala/edu/berkeley/cs/amplab/adam/rdd/AdamContext.scala +++ b/adam-core/src/main/scala/edu/berkeley/cs/amplab/adam/rdd/AdamContext.scala @@ -325,7 +325,7 @@ class AdamContext(sc: SparkContext) extends Serializable with Logging { } else { val statuses = FileSystem.get(sc.hadoopConfiguration).listStatus(path) val r = Pattern.compile(regex) - val (matches, recurse) = statuses.filter(s => s.isDirectory).map(s => s.getPath).partition(p => r.matcher(p.getName).matches()) + val (matches, recurse) = statuses.filter(s => s.isDir).map(s => s.getPath).partition(p => r.matcher(p.getName).matches()) matches.toSeq ++ recurse.flatMap(p => findFiles(p, regex)) } }