Skip to content

Commit

Permalink
SPARK-2434: Warning messages redirecting to original implementaions a…
Browse files Browse the repository at this point in the history
…dded.
  • Loading branch information
brkyvz committed Jul 17, 2014
1 parent 9c24974 commit 2cb5301
Show file tree
Hide file tree
Showing 8 changed files with 101 additions and 7 deletions.
16 changes: 15 additions & 1 deletion examples/src/main/scala/org/apache/spark/examples/LocalALS.scala
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,20 @@ object LocalALS {
}
case _ => {
System.err.println("Usage: LocalALS <M> <U> <F> <iters>")
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF ALS AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE ALS METHOD FOUND IN org.apache.spark.mllib.recommendation FOR
|MORE CONVENTIONAL USE
""".stripMargin)
System.exit(1)
}
}
printf("Running with M=%d, U=%d, F=%d, iters=%d\n", M, U, F, ITERATIONS)

System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF ALS AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE ALS METHOD FOUND IN org.apache.spark.mllib.recommendation FOR
|MORE CONVENTIONAL USE
""".stripMargin)
val R = generateR()

// Initialize m and u randomly
Expand All @@ -136,5 +145,10 @@ object LocalALS {
println("RMSE = " + rmse(R, ms, us))
println()
}
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF ALS AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE ALS METHOD FOUND IN org.apache.spark.mllib.recommendation FOR
|MORE CONVENTIONAL USE
""".stripMargin)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,11 @@ object LocalFileLR {
val lines = scala.io.Source.fromFile(args(0)).getLines().toArray
val points = lines.map(parsePoint _)
val ITERATIONS = args(1).toInt

System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
// Initialize w to a random value
var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
println("Initial w: " + w)
Expand All @@ -52,5 +56,10 @@ object LocalFileLR {
}

println("Final w: " + w)
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,11 @@ object LocalKMeans {
points.add(data(rand.nextInt(N)))
}

System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF K-MEANS CLUSTERING AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE KMeans METHOD FOUND IN org.apache.spark.mllib.clustering FOR
|MORE CONVENTIONAL USE
""".stripMargin)
val iter = points.iterator
for (i <- 1 to points.size) {
kPoints.put(i, iter.next())
Expand Down Expand Up @@ -103,5 +108,10 @@ object LocalKMeans {
}

println("Final centers: " + kPoints)
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF K-MEANS CLUSTERING AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE KMeans METHOD FOUND IN org.apache.spark.mllib.clustering FOR
|MORE CONVENTIONAL USE
""".stripMargin)
}
}
11 changes: 10 additions & 1 deletion examples/src/main/scala/org/apache/spark/examples/LocalLR.scala
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,11 @@ object LocalLR {

def main(args: Array[String]) {
val data = generateData

System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
// Initialize w to a random value
var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
println("Initial w: " + w)
Expand All @@ -60,5 +64,10 @@ object LocalLR {
}

println("Final w: " + w)
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
}
}
16 changes: 15 additions & 1 deletion examples/src/main/scala/org/apache/spark/examples/SparkALS.scala
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,19 @@ object SparkALS {
slices = slices_.getOrElse("2").toInt
case _ =>
System.err.println("Usage: SparkALS [M] [U] [F] [iters] [slices]")
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF ALS AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE ALS METHOD FOUND IN org.apache.spark.mllib.recommendation FOR
|MORE CONVENTIONAL USE
""".stripMargin)
System.exit(1)
}
printf("Running with M=%d, U=%d, F=%d, iters=%d\n", M, U, F, ITERATIONS)
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF ALS AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE ALS METHOD FOUND IN org.apache.spark.mllib.recommendation FOR
|MORE CONVENTIONAL USE
""".stripMargin)
val sparkConf = new SparkConf().setAppName("SparkALS")
val sc = new SparkContext(sparkConf)

Expand All @@ -130,7 +140,11 @@ object SparkALS {
println("RMSE = " + rmse(R, ms, us))
println()
}

System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF ALS AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE ALS METHOD FOUND IN org.apache.spark.mllib.recommendation FOR
|MORE CONVENTIONAL USE
""".stripMargin)
sc.stop()
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,18 @@ object SparkHdfsLR {
def main(args: Array[String]) {
if (args.length < 2) {
System.err.println("Usage: SparkHdfsLR <file> <iters>")
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
System.exit(1)
}

System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
val sparkConf = new SparkConf().setAppName("SparkHdfsLR")
val inputPath = args(0)
val conf = SparkHadoopUtil.get.newConfiguration()
Expand All @@ -78,6 +87,11 @@ object SparkHdfsLR {
}

println("Final w: " + w)
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
sc.stop()
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,19 @@ object SparkKMeans {

def main(args: Array[String]) {
if (args.length < 3) {
System.err.println("Usage: SparkKMeans <file> <k> <convergeDist>")
System.err.println("Usage: SparkKMeans <file> <k> <convergeDist>"
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF K-MEANS CLUSTERING AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE KMeans METHOD FOUND IN org.apache.spark.mllib.clustering FOR
|MORE CONVENTIONAL USE
""".stripMargin)
System.exit(1)
}
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF K-MEANS CLUSTERING AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE KMeans METHOD FOUND IN org.apache.spark.mllib.clustering FOR
|MORE CONVENTIONAL USE
""".stripMargin)
val sparkConf = new SparkConf().setAppName("SparkKMeans")
val sc = new SparkContext(sparkConf)
val lines = sc.textFile(args(0))
Expand Down Expand Up @@ -82,6 +92,11 @@ object SparkKMeans {

println("Final centers:")
kPoints.foreach(println)
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF K-MEANS CLUSTERING AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE KMeans METHOD FOUND IN org.apache.spark.mllib.clustering FOR
|MORE CONVENTIONAL USE
""".stripMargin)
sc.stop()
}
}
11 changes: 10 additions & 1 deletion examples/src/main/scala/org/apache/spark/examples/SparkLR.scala
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,11 @@ object SparkLR {
val sc = new SparkContext(sparkConf)
val numSlices = if (args.length > 0) args(0).toInt else 2
val points = sc.parallelize(generateData, numSlices).cache()

System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
// Initialize w to a random value
var w = DenseVector.fill(D){2 * rand.nextDouble - 1}
println("Initial w: " + w)
Expand All @@ -66,6 +70,11 @@ object SparkLR {
}

println("Final w: " + w)
System.err.println(
"""WARNING: THIS IS A NAIVE IMPLEMENTATION OF LOGISTIC REGRESSION AND IS GIVEN AS AN EXAMPLE!
|PLEASE USE THE LogisticRegression METHOD FOUND IN org.apache.spark.mllib.classification FOR
|MORE CONVENTIONAL USE
""".stripMargin)
sc.stop()
}
}

0 comments on commit 2cb5301

Please sign in to comment.