Skip to content

Commit

Permalink
Change to warning
Browse files Browse the repository at this point in the history
  • Loading branch information
sryza committed Feb 2, 2015
1 parent cc46e52 commit e9ce742
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 7 deletions.
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -311,9 +311,9 @@ class HadoopRDD[K, V](

override def persist(storageLevel: StorageLevel): this.type = {
if (storageLevel.deserialized) {
throw new SparkException("Can't cache HadoopRDDs as deserialized objects because Hadoop's" +
" RecordReader reuses the same Writable object for all records. Use a map transformation" +
" to make copies of the records.")
logWarning("Caching NewHadoopRDDs as deserialized objects usually leads to undesired" +
" behavior because Hadoop's RecordReader reuses the same Writable object for all records." +
" Use a map transformation to make copies of the records.")
}
super.persist(storageLevel)
}
Expand Down
7 changes: 3 additions & 4 deletions core/src/main/scala/org/apache/spark/rdd/NewHadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ import org.apache.spark.rdd.NewHadoopRDD.NewHadoopMapPartitionsWithSplitRDD
import org.apache.spark.util.Utils
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.storage.StorageLevel
import scala.Some

private[spark] class NewHadoopPartition(
rddId: Int,
Expand Down Expand Up @@ -211,9 +210,9 @@ class NewHadoopRDD[K, V](

override def persist(storageLevel: StorageLevel): this.type = {
if (storageLevel.deserialized) {
throw new SparkException("Can't cache NewHadoopRDDs as deserialized objects because" +
" Hadoop's RecordReader reuses the same Writable object for all records. Use a map" +
" transformation to make copies of the records.")
logWarning("Caching NewHadoopRDDs as deserialized objects usually leads to undesired" +
" behavior because Hadoop's RecordReader reuses the same Writable object for all records." +
" Use a map transformation to make copies of the records.")
}
super.persist(storageLevel)
}
Expand Down

0 comments on commit e9ce742

Please sign in to comment.