Skip to content

Commit

Permalink
Merge pull request #1648 from apache/master
Browse files Browse the repository at this point in the history
Create a new pull request by comparing changes across two branches
  • Loading branch information
GulajavaMinistudio authored May 17, 2024
2 parents 4f47353 + 1ea1561 commit 6e0fc89
Show file tree
Hide file tree
Showing 65 changed files with 312 additions and 193 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ private void transferAllOutstanding() {
logger.error("Exception while beginning {} of {} outstanding blocks (after {} retries)", e,
MDC.of(LogKeys.TRANSFER_TYPE$.MODULE$, listener.getTransferType()),
MDC.of(LogKeys.NUM_BLOCK_IDS$.MODULE$, blockIdsToTransfer.length),
MDC.of(LogKeys.RETRY_COUNT$.MODULE$, numRetries));
MDC.of(LogKeys.NUM_RETRY$.MODULE$, numRetries));
} else {
logger.error("Exception while beginning {} of {} outstanding blocks", e,
MDC.of(LogKeys.TRANSFER_TYPE$.MODULE$, listener.getTransferType()),
Expand Down Expand Up @@ -217,7 +217,7 @@ synchronized boolean initiateRetry(Throwable e) {

logger.info("Retrying {} ({}/{}) for {} outstanding blocks after {} ms",
MDC.of(LogKeys.TRANSFER_TYPE$.MODULE$, listener.getTransferType()),
MDC.of(LogKeys.RETRY_COUNT$.MODULE$, retryCount),
MDC.of(LogKeys.NUM_RETRY$.MODULE$, retryCount),
MDC.of(LogKeys.MAX_ATTEMPTS$.MODULE$, maxRetries),
MDC.of(LogKeys.NUM_BLOCK_IDS$.MODULE$, outstandingBlocksIds.size()),
MDC.of(LogKeys.RETRY_WAIT_TIME$.MODULE$, retryWaitTime));
Expand Down Expand Up @@ -313,7 +313,7 @@ private void handleBlockTransferFailure(String blockId, Throwable exception) {
logger.error("Failed to {} block {}, and will not retry ({} retries)", exception,
MDC.of(LogKeys.TRANSFER_TYPE$.MODULE$, listener.getTransferType()),
MDC.of(LogKeys.BLOCK_ID$.MODULE$, blockId),
MDC.of(LogKeys.RETRY_COUNT$.MODULE$,retryCount));
MDC.of(LogKeys.NUM_RETRY$.MODULE$,retryCount));
} else {
logger.debug(
String.format("Failed to %s block %s, and will not retry (%s retries)",
Expand Down
4 changes: 2 additions & 2 deletions common/utils/src/main/resources/error/error-conditions.json
Original file line number Diff line number Diff line change
Expand Up @@ -2675,9 +2675,9 @@
"ANALYZE TABLE(S) ... COMPUTE STATISTICS ... <ctx> must be either NOSCAN or empty."
]
},
"CREATE_FUNC_WITH_IF_NOT_EXISTS_AND_REPLACE" : {
"CREATE_ROUTINE_WITH_IF_NOT_EXISTS_AND_REPLACE" : {
"message" : [
"CREATE FUNCTION with both IF NOT EXISTS and REPLACE is not allowed."
"Cannot create a routine with both IF NOT EXISTS and REPLACE specified."
]
},
"CREATE_TEMP_FUNC_WITH_DATABASE" : {
Expand Down
68 changes: 25 additions & 43 deletions common/utils/src/main/scala/org/apache/spark/internal/LogKey.scala

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@
import org.apache.spark.internal.SparkLogger;
import org.apache.spark.internal.SparkLoggerFactory;

public class PatternLoggerSuite extends LoggerSuiteBase {
public class PatternSparkLoggerSuite extends SparkLoggerSuiteBase {

private static final SparkLogger LOGGER = SparkLoggerFactory.getLogger(PatternLoggerSuite.class);
private static final SparkLogger LOGGER =
SparkLoggerFactory.getLogger(PatternSparkLoggerSuite.class);

private String toRegexPattern(Level level, String msg) {
return msg
Expand All @@ -39,7 +40,7 @@ SparkLogger logger() {

@Override
String className() {
return PatternLoggerSuite.class.getSimpleName();
return PatternSparkLoggerSuite.class.getSimpleName();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
import org.apache.spark.internal.LogKeys;
import org.apache.spark.internal.MDC;

public abstract class LoggerSuiteBase {
public abstract class SparkLoggerSuiteBase {

abstract SparkLogger logger();
abstract String className();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@
import org.apache.spark.internal.SparkLogger;
import org.apache.spark.internal.SparkLoggerFactory;

public class StructuredLoggerSuite extends LoggerSuiteBase {
public class StructuredSparkLoggerSuite extends SparkLoggerSuiteBase {

private static final SparkLogger LOGGER =
SparkLoggerFactory.getLogger(StructuredLoggerSuite.class);
SparkLoggerFactory.getLogger(StructuredSparkLoggerSuite.class);

private static final ObjectMapper JSON_MAPPER = new ObjectMapper();
private String compactAndToRegexPattern(Level level, String json) {
Expand All @@ -50,7 +50,7 @@ SparkLogger logger() {

@Override
String className() {
return StructuredLoggerSuite.class.getSimpleName();
return StructuredSparkLoggerSuite.class.getSimpleName();
}

@Override
Expand Down
4 changes: 2 additions & 2 deletions common/utils/src/test/resources/log4j2.properties
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,12 @@ logger.pattern_logging.level = trace
logger.pattern_logging.appenderRefs = pattern
logger.pattern_logging.appenderRef.pattern.ref = pattern

logger.structured_logger.name = org.apache.spark.util.StructuredLoggerSuite
logger.structured_logger.name = org.apache.spark.util.StructuredSparkLoggerSuite
logger.structured_logger.level = trace
logger.structured_logger.appenderRefs = structured
logger.structured_logger.appenderRef.structured.ref = structured

logger.pattern_logger.name = org.apache.spark.util.PatternLoggerSuite
logger.pattern_logger.name = org.apache.spark.util.PatternSparkLoggerSuite
logger.pattern_logger.level = trace
logger.pattern_logger.appenderRefs = pattern
logger.pattern_logger.appenderRef.pattern.ref = pattern
Original file line number Diff line number Diff line change
Expand Up @@ -1651,7 +1651,7 @@ abstract class AvroSuite
errorClass = "UNSUPPORTED_DATA_TYPE_FOR_DATASOURCE",
parameters = Map(
"columnName" -> "`testType()`",
"columnType" -> "\"INTERVAL\"",
"columnType" -> "UDT(\"INTERVAL\")",
"format" -> "Avro")
)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import scala.util.control.NonFatal
import io.grpc.stub.StreamObserver

import org.apache.spark.internal.Logging
import org.apache.spark.internal.LogKeys.{ERROR, POLICY, RETRY_COUNT, RETRY_WAIT_TIME}
import org.apache.spark.internal.LogKeys.{ERROR, NUM_RETRY, POLICY, RETRY_WAIT_TIME}
import org.apache.spark.internal.MDC

private[sql] class GrpcRetryHandler(
Expand Down Expand Up @@ -190,7 +190,7 @@ private[sql] object GrpcRetryHandler extends Logging {
// retry exception is considered immediately retriable without any policies.
logWarning(
log"Non-Fatal error during RPC execution: ${MDC(ERROR, lastException)}, " +
log"retrying (currentRetryNum=${MDC(RETRY_COUNT, currentRetryNum)})")
log"retrying (currentRetryNum=${MDC(NUM_RETRY, currentRetryNum)})")
return
}

Expand All @@ -201,7 +201,7 @@ private[sql] object GrpcRetryHandler extends Logging {
logWarning(
log"Non-Fatal error during RPC execution: ${MDC(ERROR, lastException)}, " +
log"retrying (wait=${MDC(RETRY_WAIT_TIME, time.get.toMillis)} ms, " +
log"currentRetryNum=${MDC(RETRY_COUNT, currentRetryNum)}, " +
log"currentRetryNum=${MDC(NUM_RETRY, currentRetryNum)}, " +
log"policy=${MDC(POLICY, policy.getName)}).")
sleep(time.get.toMillis)
return
Expand All @@ -210,7 +210,7 @@ private[sql] object GrpcRetryHandler extends Logging {

logWarning(
log"Non-Fatal error during RPC execution: ${MDC(ERROR, lastException)}, " +
log"exceeded retries (currentRetryNum=${MDC(RETRY_COUNT, currentRetryNum)})")
log"exceeded retries (currentRetryNum=${MDC(NUM_RETRY, currentRetryNum)})")

val error = new RetriesExceeded()
exceptionList.foreach(error.addSuppressed)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import org.apache.kafka.common.requests.OffsetFetchResponse

import org.apache.spark.SparkEnv
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.{OFFSETS, RETRY_COUNT, TOPIC_PARTITION_OFFSET}
import org.apache.spark.internal.LogKeys.{NUM_RETRY, OFFSETS, TOPIC_PARTITION_OFFSET}
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.kafka010.KafkaSourceProvider.StrategyOnNoMatchStartingOffset
Expand Down Expand Up @@ -536,7 +536,7 @@ private[kafka010] class KafkaOffsetReaderAdmin(
case NonFatal(e) =>
lastException = e
logWarning(
log"Error in attempt ${MDC(RETRY_COUNT, attempt)} getting Kafka offsets: ", e)
log"Error in attempt ${MDC(NUM_RETRY, attempt)} getting Kafka offsets: ", e)
attempt += 1
Thread.sleep(offsetFetchAttemptIntervalMs)
resetAdmin()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import org.apache.kafka.common.TopicPartition

import org.apache.spark.SparkEnv
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.{OFFSETS, RETRY_COUNT, TOPIC_PARTITION_OFFSET}
import org.apache.spark.internal.LogKeys.{NUM_RETRY, OFFSETS, TOPIC_PARTITION_OFFSET}
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.kafka010.KafkaSourceProvider.StrategyOnNoMatchStartingOffset
Expand Down Expand Up @@ -613,7 +613,7 @@ private[kafka010] class KafkaOffsetReaderConsumer(
case NonFatal(e) =>
lastException = e
logWarning(
log"Error in attempt ${MDC(RETRY_COUNT, attempt)} getting Kafka offsets: ", e)
log"Error in attempt ${MDC(NUM_RETRY, attempt)} getting Kafka offsets: ", e)
attempt += 1
Thread.sleep(offsetFetchAttemptIntervalMs)
resetConsumer()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -393,9 +393,9 @@ private[kafka010] class KafkaDataConsumer(
val walTime = System.nanoTime() - startTimestampNano

logInfo(log"From Kafka ${MDC(CONSUMER, kafkaMeta)} read " +
log"${MDC(TOTAL_RECORDS_READ, totalRecordsRead)} records through " +
log"${MDC(KAFKA_PULLS_COUNT, numPolls)} polls " +
log"(polled out ${MDC(KAFKA_RECORDS_PULLED_COUNT, numRecordsPolled)} records), " +
log"${MDC(NUM_RECORDS_READ, totalRecordsRead)} records through " +
log"${MDC(NUM_KAFKA_PULLS, numPolls)} polls " +
log"(polled out ${MDC(NUM_KAFKA_RECORDS_PULLED, numRecordsPolled)} records), " +
log"taking ${MDC(TOTAL_TIME_READ, totalTimeReadNanos / NANOS_PER_MILLIS.toDouble)} ms, " +
log"during time span of ${MDC(TIME, walTime / NANOS_PER_MILLIS.toDouble)} ms."
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import com.amazonaws.services.kinesis.model._

import org.apache.spark._
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.{ERROR, RETRY_COUNT}
import org.apache.spark.internal.LogKeys.{ERROR, NUM_RETRY}
import org.apache.spark.rdd.{BlockRDD, BlockRDDPartition}
import org.apache.spark.storage.BlockId
import org.apache.spark.util.NextIterator
Expand Down Expand Up @@ -279,7 +279,7 @@ class KinesisSequenceRangeIterator(
t match {
case ptee: ProvisionedThroughputExceededException =>
logWarning(log"Error while ${MDC(ERROR, message)} " +
log"[attempt = ${MDC(RETRY_COUNT, retryCount + 1)}]", ptee)
log"[attempt = ${MDC(NUM_RETRY, retryCount + 1)}]", ptee)
case e: Throwable =>
throw new SparkException(s"Error while $message", e)
}
Expand Down
2 changes: 2 additions & 0 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -601,6 +601,8 @@ class SparkContext(config: SparkConf) extends Logging {
.foreach(logLevel => _schedulerBackend.updateExecutorsLogLevel(logLevel))
}

_conf.get(CHECKPOINT_DIR).foreach(setCheckpointDir)

val _executorMetricsSource =
if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) {
Some(new ExecutorMetricsSource)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ private[r] class RBackendHandler(server: RBackend)
log"${MDC(CLASS_NAME, cls)}.${MDC(METHOD_NAME, methodName)}. Candidates are:")
selectedMethods.foreach { method =>
logWarning(log"${MDC(METHOD_NAME, methodName)}(" +
log"${MDC(METHOD_PARAMETER_TYPES, method.getParameterTypes.mkString(","))})")
log"${MDC(METHOD_PARAM_TYPES, method.getParameterTypes.mkString(","))})")
}
throw new Exception(s"No matched method found for $cls.$methodName")
}
Expand All @@ -181,7 +181,7 @@ private[r] class RBackendHandler(server: RBackend)
+ log"Candidates are:")
ctors.foreach { ctor =>
logWarning(log"${MDC(CLASS_NAME, cls)}(" +
log"${MDC(METHOD_PARAMETER_TYPES, ctor.getParameterTypes.mkString(","))})")
log"${MDC(METHOD_PARAM_TYPES, ctor.getParameterTypes.mkString(","))})")
}
throw new Exception(s"No matched constructor found for $cls")
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1015,7 +1015,7 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
}
if (count > 0) {
logWarning(log"Fail to clean up according to MAX_LOG_NUM policy " +
log"(${MDC(MAX_LOG_NUM_POLICY, maxNum)}).")
log"(${MDC(MAX_NUM_LOG_POLICY, maxNum)}).")
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,7 @@ private[deploy] class Master(
if (!execs.exists(_.state == ExecutorState.RUNNING)) {
logError(log"Application ${MDC(LogKeys.APP_DESC, appInfo.desc.name)} " +
log"with ID ${MDC(LogKeys.APP_ID, appInfo.id)} " +
log"failed ${MDC(LogKeys.RETRY_COUNT, appInfo.retryCount)} times; removing it")
log"failed ${MDC(LogKeys.NUM_RETRY, appInfo.retryCount)} times; removing it")
removeApplication(appInfo, ApplicationState.FAILED)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1317,6 +1317,15 @@ package object config {
s" be less than or equal to ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}.")
.createWithDefault(64 * 1024 * 1024)

private[spark] val CHECKPOINT_DIR =
ConfigBuilder("spark.checkpoint.dir")
.doc(
"Set the default directory for checkpointing. It can be overwritten by " +
"SparkContext.setCheckpointDir.")
.version("4.0.0")
.stringConf
.createOptional

private[spark] val CHECKPOINT_COMPRESS =
ConfigBuilder("spark.checkpoint.compress")
.doc("Whether to compress RDD checkpoints. Generally a good idea. Compression will use " +
Expand Down
16 changes: 16 additions & 0 deletions core/src/test/scala/org/apache/spark/CheckpointSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -669,4 +669,20 @@ class CheckpointStorageSuite extends SparkFunSuite with LocalSparkContext {
assert(rdd.firstParent.isInstanceOf[ReliableCheckpointRDD[_]])
}
}

test("SPARK-48268: checkpoint directory via configuration") {
withTempDir { checkpointDir =>
val conf = new SparkConf()
.set("spark.checkpoint.dir", checkpointDir.toString)
.set(UI_ENABLED.key, "false")
sc = new SparkContext("local", "test", conf)
val parCollection = sc.makeRDD(1 to 4)
val flatMappedRDD = parCollection.flatMap(x => 1 to x)
flatMappedRDD.checkpoint()
assert(flatMappedRDD.dependencies.head.rdd === parCollection)
val result = flatMappedRDD.collect()
assert(flatMappedRDD.dependencies.head.rdd != parCollection)
assert(flatMappedRDD.collect() === result)
}
}
}
9 changes: 9 additions & 0 deletions docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -1795,6 +1795,15 @@ Apart from these, the following properties are also available, and may be useful
</td>
<td>0.6.0</td>
</tr>
<tr>
<td><code>spark.checkpoint.dir</code></td>
<td>(none)</td>
<td>
Set the default directory for checkpointing. It can be overwritten by
SparkContext.setCheckpointDir.
</td>
<td>4.0.0</td>
</tr>
<tr>
<td><code>spark.checkpoint.compress</code></td>
<td>false</td>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import scala.util.Random

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.{MAX_MEMORY_SIZE, MEMORY_SIZE, NUM_CLASSES, NUM_EXAMPLES, NUM_FEATURES, NUM_NODES, TIMER, WEIGHTED_NUM}
import org.apache.spark.internal.LogKeys.{MAX_MEMORY_SIZE, MEMORY_SIZE, NUM_CLASSES, NUM_EXAMPLES, NUM_FEATURES, NUM_NODES, NUM_WEIGHTED_EXAMPLES, TIMER}
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.impl.Utils
Expand Down Expand Up @@ -136,7 +136,7 @@ private[spark] object RandomForest extends Logging with Serializable {
logInfo(log"numClasses: ${MDC(NUM_CLASSES, metadata.numClasses)}")
logInfo(log"numExamples: ${MDC(NUM_EXAMPLES, metadata.numExamples)}")
logInfo(log"weightedNumExamples: " +
log"${MDC(WEIGHTED_NUM, metadata.weightedNumExamples)}")
log"${MDC(NUM_WEIGHTED_EXAMPLES, metadata.weightedNumExamples)}")
}

timer.start("init")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import org.json4s.DefaultFormats

import org.apache.spark.annotation.Since
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.{CROSS_VALIDATION_METRIC, CROSS_VALIDATION_METRICS, ESTIMATOR_PARAMETER_MAP}
import org.apache.spark.internal.LogKeys.{CROSS_VALIDATION_METRIC, CROSS_VALIDATION_METRICS, ESTIMATOR_PARAM_MAP}
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.evaluation.Evaluator
import org.apache.spark.ml.param.{IntParam, Param, ParamMap, ParamValidators}
Expand Down Expand Up @@ -198,7 +198,7 @@ class CrossValidator @Since("1.2.0") (@Since("1.4.0") override val uid: String)
val (bestMetric, bestIndex) =
if (eval.isLargerBetter) metrics.zipWithIndex.maxBy(_._1)
else metrics.zipWithIndex.minBy(_._1)
instr.logInfo(log"Best set of parameters:\n${MDC(ESTIMATOR_PARAMETER_MAP, epm(bestIndex))}")
instr.logInfo(log"Best set of parameters:\n${MDC(ESTIMATOR_PARAM_MAP, epm(bestIndex))}")
instr.logInfo(log"Best cross-validation metric: ${MDC(CROSS_VALIDATION_METRIC, bestMetric)}.")
val bestModel = est.fit(dataset, epm(bestIndex)).asInstanceOf[Model[_]]
copyValues(new CrossValidatorModel(uid, bestModel, metrics)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import org.json4s.DefaultFormats

import org.apache.spark.annotation.Since
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.{ESTIMATOR_PARAMETER_MAP, TRAIN_VALIDATION_SPLIT_METRIC, TRAIN_VALIDATION_SPLIT_METRICS}
import org.apache.spark.internal.LogKeys.{ESTIMATOR_PARAM_MAP, TRAIN_VALIDATION_SPLIT_METRIC, TRAIN_VALIDATION_SPLIT_METRICS}
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.evaluation.Evaluator
import org.apache.spark.ml.param.{DoubleParam, ParamMap, ParamValidators}
Expand Down Expand Up @@ -174,7 +174,7 @@ class TrainValidationSplit @Since("1.5.0") (@Since("1.5.0") override val uid: St
val (bestMetric, bestIndex) =
if (eval.isLargerBetter) metrics.zipWithIndex.maxBy(_._1)
else metrics.zipWithIndex.minBy(_._1)
instr.logInfo(log"Best set of parameters:\n${MDC(ESTIMATOR_PARAMETER_MAP, epm(bestIndex))}")
instr.logInfo(log"Best set of parameters:\n${MDC(ESTIMATOR_PARAM_MAP, epm(bestIndex))}")
instr.logInfo(log"Best train validation split metric: " +
log"${MDC(TRAIN_VALIDATION_SPLIT_METRIC, bestMetric)}.")
val bestModel = est.fit(dataset, epm(bestIndex)).asInstanceOf[Model[_]]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.{Logging, MDC}
import org.apache.spark.internal.LogKeys.{ALPHA, COUNT, TRAIN_WORD_COUNT, VOCAB_SIZE}
import org.apache.spark.internal.LogKeys.{ALPHA, COUNT, NUM_TRAIN_WORD, VOCAB_SIZE}
import org.apache.spark.internal.config.Kryo.KRYO_SERIALIZER_MAX_BUFFER_SIZE
import org.apache.spark.ml.linalg.BLAS
import org.apache.spark.mllib.linalg.{Vector, Vectors}
Expand Down Expand Up @@ -210,7 +210,7 @@ class Word2Vec extends Serializable with Logging {
a += 1
}
logInfo(log"vocabSize = ${MDC(VOCAB_SIZE, vocabSize)}," +
log" trainWordsCount = ${MDC(TRAIN_WORD_COUNT, trainWordsCount)}")
log" trainWordsCount = ${MDC(NUM_TRAIN_WORD, trainWordsCount)}")
}

private def createExpTable(): Array[Float] = {
Expand Down
Loading

0 comments on commit 6e0fc89

Please sign in to comment.