diff --git a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala index 50c5900..870a7f8 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala @@ -66,8 +66,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param stateTimeout default state timeout for this state * @param stateFunctionBuilder partial function builder describing response to input */ - final def when(stateName: S, - stateTimeout: FiniteDuration, + final def when(stateName: S, + stateTimeout: FiniteDuration, stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = when(stateName, stateTimeout)(stateFunctionBuilder.build()) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index c03278a..865ab55 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -96,7 +96,7 @@ final case class ActorIdentity(correlationId: Any, ref: Option[ActorRef]) { @SerialVersionUID(1L) final case class Terminated private[akka] (@BeanProperty actor: ActorRef)( @BeanProperty val existenceConfirmed: Boolean, - @BeanProperty val addressTerminated: Boolean) + @BeanProperty val addressTerminated: Boolean) extends AutoReceivedMessage with PossiblyHarmful with DeadLetterSuppression /** diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 4165f38..1fc24a7 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -372,11 +372,11 @@ private[akka] object ActorCell { * for! (waves hand) */ private[akka] class ActorCell( - val system: ActorSystemImpl, - val self: InternalActorRef, + val system: ActorSystemImpl, + val self: InternalActorRef, final val props: Props, // Must be final so that it can be properly cleared in clearActorCellFields - val dispatcher: MessageDispatcher, - val parent: InternalActorRef) + val dispatcher: MessageDispatcher, + val parent: InternalActorRef) extends UntypedActorContext with AbstractActorContext with Cell with dungeon.ReceiveTimeout with dungeon.Children diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 0e2cc5b..39bd193 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -302,11 +302,11 @@ private[akka] case object Nobody extends MinimalActorRef { * INTERNAL API */ private[akka] class LocalActorRef private[akka] ( - _system: ActorSystemImpl, - _props: Props, - _dispatcher: MessageDispatcher, - _mailboxType: MailboxType, - _supervisor: InternalActorRef, + _system: ActorSystemImpl, + _props: Props, + _dispatcher: MessageDispatcher, + _mailboxType: MailboxType, + _supervisor: InternalActorRef, override val path: ActorPath) extends ActorRefWithCell with LocalRef { @@ -519,8 +519,8 @@ private[akka] object DeadLetterActorRef { * INTERNAL API */ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, - override val path: ActorPath, - val eventStream: EventStream) extends MinimalActorRef { + override val path: ActorPath, + val eventStream: EventStream) extends MinimalActorRef { @deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2") override private[akka] def isTerminated = true @@ -570,8 +570,8 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, * * INTERNAL API */ -private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, - _path: ActorPath, +private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, + _path: ActorPath, _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { @@ -601,10 +601,10 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, * INTERNAL API */ private[akka] class VirtualPathContainer( - override val provider: ActorRefProvider, - override val path: ActorPath, + override val provider: ActorRefProvider, + override val path: ActorPath, override val getParent: InternalActorRef, - val log: LoggingAdapter) extends MinimalActorRef { + val log: LoggingAdapter) extends MinimalActorRef { private val children = new ConcurrentHashMap[String, InternalActorRef] @@ -705,10 +705,10 @@ private[akka] class VirtualPathContainer( * When using the watch() feature you must ensure that upon reception of the * Terminated message the watched actorRef is unwatch()ed. */ -private[akka] final class FunctionRef(override val path: ActorPath, +private[akka] final class FunctionRef(override val path: ActorPath, override val provider: ActorRefProvider, - val eventStream: EventStream, - f: (ActorRef, Any) ⇒ Unit) extends MinimalActorRef { + val eventStream: EventStream, + f: (ActorRef, Any) ⇒ Unit) extends MinimalActorRef { override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = { f(sender, message) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 2dc6aed..e4636fc 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -105,14 +105,14 @@ trait ActorRefProvider { * the latter can be suppressed by setting ``lookupDeploy`` to ``false``. */ def actorOf( - system: ActorSystemImpl, - props: Props, - supervisor: InternalActorRef, - path: ActorPath, + system: ActorSystemImpl, + props: Props, + supervisor: InternalActorRef, + path: ActorPath, systemService: Boolean, - deploy: Option[Deploy], - lookupDeploy: Boolean, - async: Boolean): InternalActorRef + deploy: Option[Deploy], + lookupDeploy: Boolean, + async: Boolean): InternalActorRef /** * INTERNAL API @@ -475,18 +475,18 @@ private[akka] object LocalActorRefProvider { * Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported. */ private[akka] class LocalActorRefProvider private[akka] ( - _systemName: String, + _systemName: String, override val settings: ActorSystem.Settings, - val eventStream: EventStream, - val dynamicAccess: DynamicAccess, + val eventStream: EventStream, + val dynamicAccess: DynamicAccess, override val deployer: Deployer, - _deadLetters: Option[ActorPath ⇒ InternalActorRef]) + _deadLetters: Option[ActorPath ⇒ InternalActorRef]) extends ActorRefProvider { // this is the constructor needed for reflectively instantiating the provider - def this(_systemName: String, - settings: ActorSystem.Settings, - eventStream: EventStream, + def this(_systemName: String, + settings: ActorSystem.Settings, + eventStream: EventStream, dynamicAccess: DynamicAccess) = this(_systemName, settings, diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index 278bcf0..81fe9a2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -214,7 +214,7 @@ object ActorSelection { matchingChildren.foreach(_.tell(sel.msg, sender)) } else { val matchingChildren = chldr.filter(c ⇒ p.pattern.matcher(c.path.name).matches) - // don't send to emptyRef after wildcard fan-out + // don't send to emptyRef after wildcard fan-out if (matchingChildren.isEmpty && !sel.wildcardFanOut) emptyRef.tell(sel, sender) else { @@ -253,8 +253,8 @@ trait ScalaActorSelection { */ @SerialVersionUID(2L) // it has protobuf serialization in akka-remote private[akka] final case class ActorSelectionMessage( - msg: Any, - elements: immutable.Iterable[SelectionPathElement], + msg: Any, + elements: immutable.Iterable[SelectionPathElement], wildcardFanOut: Boolean) extends AutoReceivedMessage with PossiblyHarmful { diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 827fc44..1638c25 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -522,11 +522,11 @@ abstract class ExtendedActorSystem extends ActorSystem { } private[akka] class ActorSystemImpl( - val name: String, - applicationConfig: Config, - classLoader: ClassLoader, + val name: String, + applicationConfig: Config, + classLoader: ClassLoader, defaultExecutionContext: Option[ExecutionContext], - val guardianProps: Option[Props]) extends ExtendedActorSystem { + val guardianProps: Option[Props]) extends ExtendedActorSystem { if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-_]*$""")) throw new IllegalArgumentException( @@ -610,7 +610,7 @@ private[akka] class ActorSystemImpl( eventStream.startStdoutLogger(settings) val logFilter: LoggingFilter = { - val arguments = Vector(classOf[Settings] -> settings, classOf[EventStream] -> eventStream) + val arguments = Vector(classOf[Settings] → settings, classOf[EventStream] → eventStream) dynamicAccess.createInstanceFor[LoggingFilter](LoggingFilter, arguments).get } @@ -620,10 +620,10 @@ private[akka] class ActorSystemImpl( val provider: ActorRefProvider = try { val arguments = Vector( - classOf[String] -> name, - classOf[Settings] -> settings, - classOf[EventStream] -> eventStream, - classOf[DynamicAccess] -> dynamicAccess) + classOf[String] → name, + classOf[Settings] → settings, + classOf[EventStream] → eventStream, + classOf[DynamicAccess] → dynamicAccess) dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get } catch { @@ -715,9 +715,9 @@ private[akka] class ActorSystemImpl( */ protected def createScheduler(): Scheduler = dynamicAccess.createInstanceFor[Scheduler](settings.SchedulerClass, immutable.Seq( - classOf[Config] -> settings.config, - classOf[LoggingAdapter] -> log, - classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler"))).get + classOf[Config] → settings.config, + classOf[LoggingAdapter] → log, + classOf[ThreadFactory] → threadFactory.withName(threadFactory.name + "-scheduler"))).get //#create-scheduler /* diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 7f71bb0..9431139 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -35,12 +35,12 @@ object Deploy { */ @SerialVersionUID(2L) final case class Deploy( - path: String = "", - config: Config = ConfigFactory.empty, + path: String = "", + config: Config = ConfigFactory.empty, routerConfig: RouterConfig = NoRouter, - scope: Scope = NoScopeGiven, - dispatcher: String = Deploy.NoDispatcherGiven, - mailbox: String = Deploy.NoMailboxGiven) { + scope: Scope = NoScopeGiven, + dispatcher: String = Deploy.NoDispatcherGiven, + mailbox: String = Deploy.NoMailboxGiven) { /** * Java API to create a Deploy with the given RouterConfig @@ -137,7 +137,7 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce protected val default = config.getConfig("default") val routerTypeMapping: Map[String, String] = settings.config.getConfig("akka.actor.router.type-mapping").root.unwrapped.asScala.collect { - case (key, value: String) ⇒ (key -> value) + case (key, value: String) ⇒ (key → value) }.toMap config.root.asScala flatMap { @@ -198,8 +198,8 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce s"[${args(0)._1.getName}] and optional [${args(1)._1.getName}] parameter", cause) // first try with Config param, and then with Config and DynamicAccess parameters - val args1 = List(classOf[Config] -> deployment2) - val args2 = List(classOf[Config] -> deployment2, classOf[DynamicAccess] -> dynamicAccess) + val args1 = List(classOf[Config] → deployment2) + val args2 = List(classOf[Config] → deployment2, classOf[DynamicAccess] → dynamicAccess) dynamicAccess.createInstanceFor[RouterConfig](fqn, args1).recover({ case e @ (_: IllegalArgumentException | _: ConfigException) ⇒ throw e case e: NoSuchMethodException ⇒ diff --git a/akka-actor/src/main/scala/akka/actor/Extension.scala b/akka-actor/src/main/scala/akka/actor/Extension.scala index 175e839..077785c 100644 --- a/akka-actor/src/main/scala/akka/actor/Extension.scala +++ b/akka-actor/src/main/scala/akka/actor/Extension.scala @@ -150,5 +150,5 @@ abstract class ExtensionKey[T <: Extension](implicit m: ClassTag[T]) extends Ext def this(clazz: Class[T]) = this()(ClassTag(clazz)) override def lookup(): ExtensionId[T] = this - def createExtension(system: ExtendedActorSystem): T = system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] -> system)).get + def createExtension(system: ExtendedActorSystem): T = system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] → system)).get } diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index e2e38d7..186888d 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -380,9 +380,9 @@ abstract class SupervisorStrategy { * @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled */ case class AllForOneStrategy( - maxNrOfRetries: Int = -1, - withinTimeRange: Duration = Duration.Inf, - override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) + maxNrOfRetries: Int = -1, + withinTimeRange: Duration = Duration.Inf, + override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) extends SupervisorStrategy { import SupervisorStrategy._ @@ -458,9 +458,9 @@ case class AllForOneStrategy( * @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled */ case class OneForOneStrategy( - maxNrOfRetries: Int = -1, - withinTimeRange: Duration = Duration.Inf, - override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) + maxNrOfRetries: Int = -1, + withinTimeRange: Duration = Duration.Inf, + override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) extends SupervisorStrategy { /** diff --git a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala index aa723ad..17649d6 100644 --- a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala @@ -34,8 +34,8 @@ import akka.dispatch.AbstractNodeQueue * scheduled possibly one tick later than they could be (if checking that * “now() + delay <= nextTick” were done). */ -class LightArrayRevolverScheduler(config: Config, - log: LoggingAdapter, +class LightArrayRevolverScheduler(config: Config, + log: LoggingAdapter, threadFactory: ThreadFactory) extends Scheduler with Closeable { @@ -84,8 +84,8 @@ class LightArrayRevolverScheduler(config: Config, } override def schedule(initialDelay: FiniteDuration, - delay: FiniteDuration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = { + delay: FiniteDuration, + runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = { checkMaxDelay(roundUp(delay).toNanos) val preparedEC = executor.prepare() try new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self ⇒ @@ -215,7 +215,7 @@ class LightArrayRevolverScheduler(config: Config, time - start + // calculate the nanos since timer start (ticks * tickNanos) + // adding the desired delay tickNanos - 1 // rounding up - ) / tickNanos).toInt // and converting to slot number + ) / tickNanos).toInt // and converting to slot number // tick is an Int that will wrap around, but toInt of futureTick gives us modulo operations // and the difference (offset) will be correct in any case val offset = futureTick - tick diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala index 311a1ab..987b40d 100644 --- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala @@ -25,12 +25,12 @@ import akka.dispatch.sysmsg._ * and swap out the cell ref. */ private[akka] class RepointableActorRef( - val system: ActorSystemImpl, - val props: Props, - val dispatcher: MessageDispatcher, + val system: ActorSystemImpl, + val props: Props, + val dispatcher: MessageDispatcher, val mailboxType: MailboxType, - val supervisor: InternalActorRef, - val path: ActorPath) + val supervisor: InternalActorRef, + val path: ActorPath) extends ActorRefWithCell with RepointableRef { import AbstractActorRef.{ cellOffset, lookupOffset } @@ -172,8 +172,8 @@ private[akka] class RepointableActorRef( } private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, - val self: RepointableActorRef, - val props: Props, + val self: RepointableActorRef, + val props: Props, val supervisor: InternalActorRef) extends Cell { /* diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 9bca96a..94f3b9b 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -42,10 +42,10 @@ trait Scheduler { */ final def schedule( initialDelay: FiniteDuration, - interval: FiniteDuration, - receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext, - sender: ActorRef = Actor.noSender): Cancellable = + interval: FiniteDuration, + receiver: ActorRef, + message: Any)(implicit executor: ExecutionContext, + sender: ActorRef = Actor.noSender): Cancellable = schedule(initialDelay, interval, new Runnable { def run = { receiver ! message @@ -71,8 +71,9 @@ trait Scheduler { */ final def schedule( initialDelay: FiniteDuration, - interval: FiniteDuration)(f: ⇒ Unit)( - implicit executor: ExecutionContext): Cancellable = + interval: FiniteDuration)(f: ⇒ Unit)( + implicit + executor: ExecutionContext): Cancellable = schedule(initialDelay, interval, new Runnable { override def run = f }) /** @@ -93,8 +94,8 @@ trait Scheduler { */ def schedule( initialDelay: FiniteDuration, - interval: FiniteDuration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable + interval: FiniteDuration, + runnable: Runnable)(implicit executor: ExecutionContext): Cancellable /** * Schedules a message to be sent once with a delay, i.e. a time period that has @@ -103,10 +104,10 @@ trait Scheduler { * Java & Scala API */ final def scheduleOnce( - delay: FiniteDuration, + delay: FiniteDuration, receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext, - sender: ActorRef = Actor.noSender): Cancellable = + message: Any)(implicit executor: ExecutionContext, + sender: ActorRef = Actor.noSender): Cancellable = scheduleOnce(delay, new Runnable { override def run = receiver ! message }) @@ -118,7 +119,8 @@ trait Scheduler { * Scala API */ final def scheduleOnce(delay: FiniteDuration)(f: ⇒ Unit)( - implicit executor: ExecutionContext): Cancellable = + implicit + executor: ExecutionContext): Cancellable = scheduleOnce(delay, new Runnable { override def run = f }) /** @@ -128,7 +130,7 @@ trait Scheduler { * Java & Scala API */ def scheduleOnce( - delay: FiniteDuration, + delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable /** diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 9ec9b2e..0ef519a 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -523,11 +523,11 @@ object TypedProps { @SerialVersionUID(1L) final case class TypedProps[T <: AnyRef] protected[TypedProps] ( interfaces: immutable.Seq[Class[_]], - creator: () ⇒ T, - dispatcher: String = TypedProps.defaultDispatcherId, - deploy: Deploy = Props.defaultDeploy, - timeout: Option[Timeout] = TypedProps.defaultTimeout, - loader: Option[ClassLoader] = TypedProps.defaultLoader) { + creator: () ⇒ T, + dispatcher: String = TypedProps.defaultDispatcherId, + deploy: Deploy = Props.defaultDeploy, + timeout: Option[Timeout] = TypedProps.defaultTimeout, + loader: Option[ClassLoader] = TypedProps.defaultLoader) { /** * Uses the supplied class as the factory for the TypedActor implementation, diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 52ada0f..42fdaab 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -322,8 +322,8 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites: case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) case fqcn ⇒ val args = List( - classOf[Config] -> config, - classOf[DispatcherPrerequisites] -> prerequisites) + classOf[Config] → config, + classOf[DispatcherPrerequisites] → prerequisites) prerequisites.dynamicAccess.createInstanceFor[ExecutorServiceConfigurator](fqcn, args).recover({ case exception ⇒ throw new IllegalArgumentException( ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], @@ -377,13 +377,13 @@ object ForkJoinExecutorConfigurator { /** * INTERNAL AKKA USAGE ONLY */ - final class AkkaForkJoinPool(parallelism: Int, - threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + final class AkkaForkJoinPool(parallelism: Int, + threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, unhandledExceptionHandler: Thread.UncaughtExceptionHandler, - asyncMode: Boolean) + asyncMode: Boolean) extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, asyncMode) with LoadMetrics { - def this(parallelism: Int, - threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + def this(parallelism: Int, + threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, unhandledExceptionHandler: Thread.UncaughtExceptionHandler) = this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true) override def execute(r: Runnable): Unit = @@ -426,8 +426,8 @@ class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrer } class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, - val parallelism: Int, - val asyncMode: Boolean) extends ExecutorServiceFactory { + val parallelism: Int, + val asyncMode: Boolean) extends ExecutorServiceFactory { def this(threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, parallelism: Int) = this(threadFactory, parallelism, asyncMode = true) def createExecutorService: ExecutorService = new AkkaForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing, asyncMode) } diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index d79eed9..6568df3 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -30,14 +30,14 @@ import scala.concurrent.duration.FiniteDuration */ @deprecated("Use BalancingPool instead of BalancingDispatcher", "2.3") class BalancingDispatcher( - _configurator: MessageDispatcherConfigurator, - _id: String, - throughput: Int, - throughputDeadlineTime: Duration, - _mailboxType: MailboxType, + _configurator: MessageDispatcherConfigurator, + _id: String, + throughput: Int, + throughputDeadlineTime: Duration, + _mailboxType: MailboxType, _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, - _shutdownTimeout: FiniteDuration, - attemptTeamWork: Boolean) + _shutdownTimeout: FiniteDuration, + attemptTeamWork: Boolean) extends Dispatcher(_configurator, _id, throughput, throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) { /** diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index e5b9784..add93f9 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -25,12 +25,12 @@ import scala.concurrent.duration.FiniteDuration * Larger values (or zero or negative) increase throughput, smaller values increase fairness */ class Dispatcher( - _configurator: MessageDispatcherConfigurator, - val id: String, - val throughput: Int, - val throughputDeadlineTime: Duration, + _configurator: MessageDispatcherConfigurator, + val id: String, + val throughput: Int, + val throughputDeadlineTime: Duration, executorServiceFactoryProvider: ExecutorServiceFactoryProvider, - val shutdownTimeout: FiniteDuration) + val shutdownTimeout: FiniteDuration) extends MessageDispatcher(_configurator) { import configurator.prerequisites._ diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 78ffa96..ae89e35 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -30,12 +30,12 @@ trait DispatcherPrerequisites { * INTERNAL API */ private[akka] final case class DefaultDispatcherPrerequisites( - val threadFactory: ThreadFactory, - val eventStream: EventStream, - val scheduler: Scheduler, - val dynamicAccess: DynamicAccess, - val settings: ActorSystem.Settings, - val mailboxes: Mailboxes, + val threadFactory: ThreadFactory, + val eventStream: EventStream, + val scheduler: Scheduler, + val dynamicAccess: DynamicAccess, + val settings: ActorSystem.Settings, + val mailboxes: Mailboxes, val defaultExecutionContext: Option[ExecutionContext]) extends DispatcherPrerequisites object Dispatchers { @@ -135,13 +135,13 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc def simpleName = id.substring(id.lastIndexOf('.') + 1) idConfig(id) .withFallback(appConfig) - .withFallback(ConfigFactory.parseMap(Map("name" -> simpleName).asJava)) + .withFallback(ConfigFactory.parseMap(Map("name" → simpleName).asJava)) .withFallback(defaultDispatcherConfig) } private def idConfig(id: String): Config = { import scala.collection.JavaConverters._ - ConfigFactory.parseMap(Map("id" -> id).asJava) + ConfigFactory.parseMap(Map("id" → id).asJava) } /** @@ -180,7 +180,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc classOf[BalancingDispatcherConfigurator].getName) case "PinnedDispatcher" ⇒ new PinnedDispatcherConfigurator(cfg, prerequisites) case fqn ⇒ - val args = List(classOf[Config] -> cfg, classOf[DispatcherPrerequisites] -> prerequisites) + val args = List(classOf[Config] → cfg, classOf[DispatcherPrerequisites] → prerequisites) prerequisites.dynamicAccess.createInstanceFor[MessageDispatcherConfigurator](fqn, args).recover({ case exception ⇒ throw new ConfigurationException( diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index d25c825..9c9e9d2 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -9,7 +9,7 @@ import akka.japi.{ Function ⇒ JFunc, Option ⇒ JOption, Procedure } import scala.concurrent.{ Future, Promise, ExecutionContext, ExecutionContextExecutor, ExecutionContextExecutorService } import java.lang.{ Iterable ⇒ JIterable } import java.util.{ LinkedList ⇒ JLinkedList } -import java.util.concurrent.{ Executor, ExecutorService, Callable} +import java.util.concurrent.{ Executor, ExecutorService, Callable } import scala.util.{ Try, Success, Failure } import java.util.concurrent.CompletionStage import java.util.concurrent.CompletableFuture diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 1d34f80..00260cb 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -248,7 +248,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) * Process the messages in the mailbox */ @tailrec private final def processMailbox( - left: Int = java.lang.Math.max(dispatcher.throughput, 1), + left: Int = java.lang.Math.max(dispatcher.throughput, 1), deadlineNs: Long = if (dispatcher.isThroughputDeadlineTimeDefined == true) System.nanoTime + dispatcher.throughputDeadlineTime.toNanos else 0L): Unit = if (shouldProcessMessage) { val next = dequeue() diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala index 1fb40b6..9529c7c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala @@ -23,10 +23,10 @@ object Mailboxes { } private[akka] class Mailboxes( - val settings: ActorSystem.Settings, + val settings: ActorSystem.Settings, val eventStream: EventStream, - dynamicAccess: DynamicAccess, - deadLetters: ActorRef) { + dynamicAccess: DynamicAccess, + deadLetters: ActorRef) { import Mailboxes._ @@ -187,7 +187,7 @@ private[akka] class Mailboxes( val mailboxType = conf.getString("mailbox-type") match { case "" ⇒ throw new ConfigurationException(s"The setting mailbox-type, defined in [$id] is empty") case fqcn ⇒ - val args = List(classOf[ActorSystem.Settings] -> settings, classOf[Config] -> conf) + val args = List(classOf[ActorSystem.Settings] → settings, classOf[Config] → conf) dynamicAccess.createInstanceFor[MailboxType](fqcn, args).recover({ case exception ⇒ throw new IllegalArgumentException( @@ -228,7 +228,7 @@ private[akka] class Mailboxes( //INTERNAL API private def config(id: String): Config = { import scala.collection.JavaConverters._ - ConfigFactory.parseMap(Map("id" -> id).asJava) + ConfigFactory.parseMap(Map("id" → id).asJava) .withFallback(settings.config.getConfig(id)) .withFallback(defaultMailboxConfig) } diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index 3eba061..21918bb 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -15,10 +15,10 @@ import scala.concurrent.duration.FiniteDuration * the `lookup` method in [[akka.dispatch.Dispatchers]]. */ class PinnedDispatcher( - _configurator: MessageDispatcherConfigurator, - _actor: ActorCell, - _id: String, - _shutdownTimeout: FiniteDuration, + _configurator: MessageDispatcherConfigurator, + _actor: ActorCell, + _id: String, + _shutdownTimeout: FiniteDuration, _threadPoolConfig: ThreadPoolConfig) extends Dispatcher(_configurator, _id, diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index ce88a43..88d0f35 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -65,12 +65,12 @@ trait ExecutorServiceFactoryProvider { /** * A small configuration DSL to create ThreadPoolExecutors that can be provided as an ExecutorServiceFactoryProvider to Dispatcher */ -final case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout, - corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize, - maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize, - threadTimeout: Duration = ThreadPoolConfig.defaultTimeout, - queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(), - rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy) +final case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout, + corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize, + maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize, + threadTimeout: Duration = ThreadPoolConfig.defaultTimeout, + queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(), + rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy) extends ExecutorServiceFactoryProvider { class ThreadPoolExecutorServiceFactory(val threadFactory: ThreadFactory) extends ExecutorServiceFactory { def createExecutorService: ExecutorService = { @@ -173,11 +173,11 @@ object MonitorableThreadFactory { } } -final case class MonitorableThreadFactory(name: String, - daemonic: Boolean, - contextClassLoader: Option[ClassLoader], - exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing, - protected val counter: AtomicLong = new AtomicLong) +final case class MonitorableThreadFactory(name: String, + daemonic: Boolean, + contextClassLoader: Option[ClassLoader], + exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing, + protected val counter: AtomicLong = new AtomicLong) extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory { def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { diff --git a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala index 5dc3091..9c0da4a 100644 --- a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala +++ b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala @@ -261,6 +261,6 @@ private[akka] final case class Failed(child: ActorRef, cause: Throwable, uid: In @SerialVersionUID(1L) private[akka] final case class DeathWatchNotification( - actor: ActorRef, + actor: ActorRef, existenceConfirmed: Boolean, - addressTerminated: Boolean) extends SystemMessage with DeadLetterSuppression + addressTerminated: Boolean) extends SystemMessage with DeadLetterSuppression diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala index 6ee9aab..9a342d5 100644 --- a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala +++ b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala @@ -58,7 +58,7 @@ object SimpleDnsCache { new Cache( queue + new ExpiryEntry(answer.name, until), - cache + (answer.name -> CacheEntry(answer, until)), + cache + (answer.name → CacheEntry(answer, until)), clock) } diff --git a/akka-actor/src/main/scala/akka/io/Tcp.scala b/akka-actor/src/main/scala/akka/io/Tcp.scala index d8f42f7..3808643 100644 --- a/akka-actor/src/main/scala/akka/io/Tcp.scala +++ b/akka-actor/src/main/scala/akka/io/Tcp.scala @@ -111,10 +111,10 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * @param options Please refer to the `Tcp.SO` object for a list of all supported options. */ final case class Connect(remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - timeout: Option[FiniteDuration] = None, - pullMode: Boolean = false) extends Command + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + timeout: Option[FiniteDuration] = None, + pullMode: Boolean = false) extends Command /** * The Bind message is send to the TCP manager actor, which is obtained via @@ -135,11 +135,11 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * * @param options Please refer to the `Tcp.SO` object for a list of all supported options. */ - final case class Bind(handler: ActorRef, + final case class Bind(handler: ActorRef, localAddress: InetSocketAddress, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - pullMode: Boolean = false) extends Command + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + pullMode: Boolean = false) extends Command /** * This message must be sent to a TCP connection actor after receiving the @@ -625,10 +625,10 @@ object TcpMessage { * @param pullMode enables pull based reading from the connection */ def connect(remoteAddress: InetSocketAddress, - localAddress: InetSocketAddress, - options: JIterable[SocketOption], - timeout: FiniteDuration, - pullMode: Boolean): Command = Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode) + localAddress: InetSocketAddress, + options: JIterable[SocketOption], + timeout: FiniteDuration, + pullMode: Boolean): Command = Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode) /** * Connect to the given `remoteAddress` without binding to a local address and without @@ -658,17 +658,17 @@ object TcpMessage { * @param pullMode enables pull based accepting and of connections and pull * based reading from the accepted connections. */ - def bind(handler: ActorRef, + def bind(handler: ActorRef, endpoint: InetSocketAddress, - backlog: Int, - options: JIterable[SocketOption], + backlog: Int, + options: JIterable[SocketOption], pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode) /** * Open a listening socket without specifying options. */ - def bind(handler: ActorRef, + def bind(handler: ActorRef, endpoint: InetSocketAddress, - backlog: Int): Command = Bind(handler, endpoint, backlog, Nil) + backlog: Int): Command = Bind(handler, endpoint, backlog, Nil) /** * This message must be sent to a TCP connection actor after receiving the diff --git a/akka-actor/src/main/scala/akka/io/TcpConnection.scala b/akka-actor/src/main/scala/akka/io/TcpConnection.scala index 6cd9d60..71b4938 100644 --- a/akka-actor/src/main/scala/akka/io/TcpConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpConnection.scala @@ -388,9 +388,9 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha class PendingBufferWrite( val commander: ActorRef, remainingData: ByteString, - ack: Any, - buffer: ByteBuffer, - tail: WriteCommand) extends PendingWrite { + ack: Any, + buffer: ByteBuffer, + tail: WriteCommand) extends PendingWrite { def doWrite(info: ConnectionInfo): PendingWrite = { @tailrec def writeToChannel(data: ByteString): PendingWrite = { @@ -429,11 +429,11 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha class PendingWriteFile( val commander: ActorRef, - fileChannel: FileChannel, - offset: Long, - remaining: Long, - ack: Event, - tail: WriteCommand) extends PendingWrite with Runnable { + fileChannel: FileChannel, + offset: Long, + remaining: Long, + ack: Event, + tail: WriteCommand) extends PendingWrite with Runnable { def doWrite(info: ConnectionInfo): PendingWrite = { tcp.fileIoDispatcher.execute(this) @@ -479,10 +479,10 @@ private[io] object TcpConnection { /** * Groups required connection-related data that are only available once the connection has been fully established. */ - final case class ConnectionInfo(registration: ChannelRegistration, - handler: ActorRef, + final case class ConnectionInfo(registration: ChannelRegistration, + handler: ActorRef, keepOpenOnPeerClosed: Boolean, - useResumeWriting: Boolean) + useResumeWriting: Boolean) // INTERNAL MESSAGES diff --git a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala index 689a5b8..b6de00b 100644 --- a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala @@ -15,11 +15,11 @@ import akka.io.Inet.SocketOption * * INTERNAL API */ -private[io] class TcpIncomingConnection(_tcp: TcpExt, - _channel: SocketChannel, - registry: ChannelRegistry, - bindHandler: ActorRef, - options: immutable.Traversable[SocketOption], +private[io] class TcpIncomingConnection(_tcp: TcpExt, + _channel: SocketChannel, + registry: ChannelRegistry, + bindHandler: ActorRef, + options: immutable.Traversable[SocketOption], readThrottling: Boolean) extends TcpConnection(_tcp, _channel, readThrottling) { diff --git a/akka-actor/src/main/scala/akka/io/TcpListener.scala b/akka-actor/src/main/scala/akka/io/TcpListener.scala index 7e5771a..7f34b42 100644 --- a/akka-actor/src/main/scala/akka/io/TcpListener.scala +++ b/akka-actor/src/main/scala/akka/io/TcpListener.scala @@ -29,11 +29,11 @@ private[io] object TcpListener { /** * INTERNAL API */ -private[io] class TcpListener(selectorRouter: ActorRef, - tcp: TcpExt, +private[io] class TcpListener(selectorRouter: ActorRef, + tcp: TcpExt, channelRegistry: ChannelRegistry, - bindCommander: ActorRef, - bind: Bind) + bindCommander: ActorRef, + bind: Bind) extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import TcpListener._ diff --git a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala index e6fce9c..56e221d 100644 --- a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala @@ -19,10 +19,10 @@ import akka.io.Tcp._ * * INTERNAL API */ -private[io] class TcpOutgoingConnection(_tcp: TcpExt, +private[io] class TcpOutgoingConnection(_tcp: TcpExt, channelRegistry: ChannelRegistry, - commander: ActorRef, - connect: Connect) + commander: ActorRef, + connect: Connect) extends TcpConnection(_tcp, SocketChannel.open().configureBlocking(false).asInstanceOf[SocketChannel], connect.pullMode) { import context._ diff --git a/akka-actor/src/main/scala/akka/io/Udp.scala b/akka-actor/src/main/scala/akka/io/Udp.scala index 035cd31..543c21c 100644 --- a/akka-actor/src/main/scala/akka/io/Udp.scala +++ b/akka-actor/src/main/scala/akka/io/Udp.scala @@ -92,9 +92,9 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { * The listener actor for the newly bound port will reply with a [[Bound]] * message, or the manager will reply with a [[CommandFailed]] message. */ - final case class Bind(handler: ActorRef, + final case class Bind(handler: ActorRef, localAddress: InetSocketAddress, - options: immutable.Traversable[SocketOption] = Nil) extends Command + options: immutable.Traversable[SocketOption] = Nil) extends Command /** * Send this message to the listener actor that previously sent a [[Bound]] diff --git a/akka-actor/src/main/scala/akka/io/UdpConnected.scala b/akka-actor/src/main/scala/akka/io/UdpConnected.scala index 1d9a39c..266c5ec 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnected.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnected.scala @@ -84,10 +84,10 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide * which is restricted to sending to and receiving from the given `remoteAddress`. * All received datagrams will be sent to the designated `handler` actor. */ - final case class Connect(handler: ActorRef, + final case class Connect(handler: ActorRef, remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil) extends Command + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil) extends Command /** * Send this message to a connection actor (which had previously sent the @@ -176,20 +176,20 @@ object UdpConnectedMessage { * which is restricted to sending to and receiving from the given `remoteAddress`. * All received datagrams will be sent to the designated `handler` actor. */ - def connect(handler: ActorRef, + def connect(handler: ActorRef, remoteAddress: InetSocketAddress, - localAddress: InetSocketAddress, - options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options) + localAddress: InetSocketAddress, + options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options) /** * Connect without specifying the `localAddress`. */ - def connect(handler: ActorRef, + def connect(handler: ActorRef, remoteAddress: InetSocketAddress, - options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options) + options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options) /** * Connect without specifying the `localAddress` or `options`. */ - def connect(handler: ActorRef, + def connect(handler: ActorRef, remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil) /** diff --git a/akka-actor/src/main/scala/akka/io/UdpConnection.scala b/akka-actor/src/main/scala/akka/io/UdpConnection.scala index 0f3051d..5adb9ad 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnection.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnection.scala @@ -18,10 +18,10 @@ import akka.io.UdpConnected._ /** * INTERNAL API */ -private[io] class UdpConnection(udpConn: UdpConnectedExt, +private[io] class UdpConnection(udpConn: UdpConnectedExt, channelRegistry: ChannelRegistry, - commander: ActorRef, - connect: Connect) + commander: ActorRef, + connect: Connect) extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import connect._ diff --git a/akka-actor/src/main/scala/akka/io/UdpListener.scala b/akka-actor/src/main/scala/akka/io/UdpListener.scala index 239f572..5879952 100644 --- a/akka-actor/src/main/scala/akka/io/UdpListener.scala +++ b/akka-actor/src/main/scala/akka/io/UdpListener.scala @@ -18,10 +18,10 @@ import akka.io.Udp._ /** * INTERNAL API */ -private[io] class UdpListener(val udp: UdpExt, +private[io] class UdpListener(val udp: UdpExt, channelRegistry: ChannelRegistry, - bindCommander: ActorRef, - bind: Bind) + bindCommander: ActorRef, + bind: Bind) extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import udp.bufferPool diff --git a/akka-actor/src/main/scala/akka/io/UdpSender.scala b/akka-actor/src/main/scala/akka/io/UdpSender.scala index a52bfc4..273fbd8 100644 --- a/akka-actor/src/main/scala/akka/io/UdpSender.scala +++ b/akka-actor/src/main/scala/akka/io/UdpSender.scala @@ -14,10 +14,10 @@ import akka.actor._ /** * INTERNAL API */ -private[io] class UdpSender(val udp: UdpExt, +private[io] class UdpSender(val udp: UdpExt, channelRegistry: ChannelRegistry, - commander: ActorRef, - options: immutable.Traversable[SocketOption]) + commander: ActorRef, + options: immutable.Traversable[SocketOption]) extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] { val channel = { diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala index f575148..8541459 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffOnRestartSupervisor.scala @@ -16,12 +16,12 @@ import akka.actor.SupervisorStrategy._ */ private class BackoffOnRestartSupervisor( val childProps: Props, - val childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - val reset: BackoffReset, - randomFactor: Double, - strategy: OneForOneStrategy) + val childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + val reset: BackoffReset, + randomFactor: Double, + strategy: OneForOneStrategy) extends Actor with HandleBackoff with ActorLogging { diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala index 36a5dca..d7af6f7 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala @@ -70,10 +70,10 @@ object Backoff { * In order to skip this additional delay pass in `0`. */ def onFailure( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, randomFactor: Double): BackoffOptions = BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor) @@ -131,10 +131,10 @@ object Backoff { * In order to skip this additional delay pass in `0`. */ def onStop( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, randomFactor: Double): BackoffOptions = BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor) } @@ -183,14 +183,14 @@ trait BackoffOptions { } private final case class BackoffOptionsImpl( - backoffType: BackoffType = RestartImpliesFailure, - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - reset: Option[BackoffReset] = None, - supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider)) extends BackoffOptions { + backoffType: BackoffType = RestartImpliesFailure, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + reset: Option[BackoffReset] = None, + supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider)) extends BackoffOptions { val backoffReset = reset.getOrElse(AutoReset(minBackoff)) diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala index aa19dd0..0f3426d 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala @@ -37,10 +37,10 @@ object BackoffSupervisor { * In order to skip this additional delay pass in `0`. */ def props( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, randomFactor: Double): Props = { propsWithSupervisorStrategy(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy) } @@ -66,12 +66,12 @@ object BackoffSupervisor { * in the child */ def propsWithSupervisorStrategy( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, randomFactor: Double, - strategy: SupervisorStrategy): Props = { + strategy: SupervisorStrategy): Props = { require(minBackoff > Duration.Zero, "minBackoff must be > 0") require(maxBackoff >= minBackoff, "maxBackoff must be >= minBackoff") require(0.0 <= randomFactor && randomFactor <= 1.0, "randomFactor must be between 0.0 and 1.0") @@ -135,7 +135,7 @@ object BackoffSupervisor { private[akka] final case object StartChild extends DeadLetterSuppression - // not final for binary compatibility with 2.4.1 + // not final for binary compatibility with 2.4.1 private[akka] case class ResetRestartCount(current: Int) extends DeadLetterSuppression /** @@ -145,8 +145,8 @@ object BackoffSupervisor { */ private[akka] def calculateDelay( restartCount: Int, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, randomFactor: Double): FiniteDuration = { val rnd = 1.0 + ThreadLocalRandom.current().nextDouble() * randomFactor if (restartCount >= 30) // Duration overflow protection (> 100 years) @@ -166,12 +166,12 @@ object BackoffSupervisor { */ final class BackoffSupervisor( val childProps: Props, - val childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - val reset: BackoffReset, - randomFactor: Double, - strategy: SupervisorStrategy) + val childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + val reset: BackoffReset, + randomFactor: Double, + strategy: SupervisorStrategy) extends Actor with HandleBackoff { import BackoffSupervisor._ @@ -192,20 +192,20 @@ final class BackoffSupervisor( // for binary compatibility with 2.4.1 def this( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, supervisorStrategy: SupervisorStrategy) = this(childProps, childName, minBackoff, maxBackoff, AutoReset(minBackoff), randomFactor, supervisorStrategy) // for binary compatibility with 2.4.0 def this( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, randomFactor: Double) = this(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy) diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index 2d83675..9e0a52c 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -515,5 +515,5 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite */ class CircuitBreakerOpenException( val remainingDuration: FiniteDuration, - message: String = "Circuit Breaker is open; calls are failing fast") + message: String = "Circuit Breaker is open; calls are failing fast") extends AkkaException(message) with NoStackTrace diff --git a/akka-actor/src/main/scala/akka/routing/Balancing.scala b/akka-actor/src/main/scala/akka/routing/Balancing.scala index e7210b7..6f81cd1 100644 --- a/akka-actor/src/main/scala/akka/routing/Balancing.scala +++ b/akka-actor/src/main/scala/akka/routing/Balancing.scala @@ -66,9 +66,9 @@ private[akka] final class BalancingRoutingLogic extends RoutingLogic { */ @SerialVersionUID(1L) final case class BalancingPool( - override val nrOfInstances: Int, + override val nrOfInstances: Int, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Pool { def this(config: Config) = diff --git a/akka-actor/src/main/scala/akka/routing/Broadcast.scala b/akka-actor/src/main/scala/akka/routing/Broadcast.scala index fbc54e4..b71f315 100644 --- a/akka-actor/src/main/scala/akka/routing/Broadcast.scala +++ b/akka-actor/src/main/scala/akka/routing/Broadcast.scala @@ -58,8 +58,8 @@ final class BroadcastRoutingLogic extends RoutingLogic { final case class BroadcastPool( override val nrOfInstances: Int, override val resizer: Option[Resizer] = None, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool with PoolOverrideUnsetConfig[BroadcastPool] { def this(config: Config) = @@ -118,8 +118,8 @@ final case class BroadcastPool( */ @SerialVersionUID(1L) final case class BroadcastGroup( - override val paths: immutable.Iterable[String], - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + override val paths: immutable.Iterable[String], + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config) = diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index 69671a1..5398dab 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -39,7 +39,7 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v */ def :+(node: T): ConsistentHash[T] = { val nodeHash = hashFor(node.toString) - new ConsistentHash(nodes ++ ((1 to virtualNodesFactor) map { r ⇒ (concatenateNodeHash(nodeHash, r) -> node) }), + new ConsistentHash(nodes ++ ((1 to virtualNodesFactor) map { r ⇒ (concatenateNodeHash(nodeHash, r) → node) }), virtualNodesFactor) } @@ -115,7 +115,7 @@ object ConsistentHash { node ← nodes nodeHash = hashFor(node.toString) vnode ← 1 to virtualNodesFactor - } yield (concatenateNodeHash(nodeHash, vnode) -> node)), + } yield (concatenateNodeHash(nodeHash, vnode) → node)), virtualNodesFactor) } diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala index 1139fc1..e0ba2b1 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala @@ -135,9 +135,9 @@ object ConsistentHashingRoutingLogic { */ @SerialVersionUID(1L) final case class ConsistentHashingRoutingLogic( - system: ActorSystem, - virtualNodesFactor: Int = 0, - hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping) + system: ActorSystem, + virtualNodesFactor: Int = 0, + hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping) extends RoutingLogic { import ConsistentHashingRouter._ @@ -257,13 +257,13 @@ final case class ConsistentHashingRoutingLogic( */ @SerialVersionUID(1L) final case class ConsistentHashingPool( - override val nrOfInstances: Int, - override val resizer: Option[Resizer] = None, - val virtualNodesFactor: Int = 0, - val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + val virtualNodesFactor: Int = 0, + val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool with PoolOverrideUnsetConfig[ConsistentHashingPool] { def this(config: Config) = @@ -345,10 +345,10 @@ final case class ConsistentHashingPool( */ @SerialVersionUID(1L) final case class ConsistentHashingGroup( - override val paths: immutable.Iterable[String], - val virtualNodesFactor: Int = 0, - val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + override val paths: immutable.Iterable[String], + val virtualNodesFactor: Int = 0, + val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config) = diff --git a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala index a38e00a..e72a6b1 100644 --- a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala +++ b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala @@ -44,9 +44,9 @@ case object OptimalSizeExploringResizer { */ private[routing] case class ResizeRecord( underutilizationStreak: Option[UnderUtilizationStreak] = None, - messageCount: Long = 0, - totalQueueLength: Int = 0, - checkTime: Long = 0) + messageCount: Long = 0, + totalQueueLength: Int = 0, + checkTime: Long = 0) /** * INTERNAL API @@ -115,16 +115,16 @@ case object OptimalSizeExploringResizer { */ @SerialVersionUID(1L) case class DefaultOptimalSizeExploringResizer( - lowerBound: PoolSize = 1, - upperBound: PoolSize = 30, - chanceOfScalingDownWhenFull: Double = 0.2, - actionInterval: Duration = 5.seconds, - numOfAdjacentSizesToConsiderDuringOptimization: Int = 16, - exploreStepSize: Double = 0.1, - downsizeRatio: Double = 0.8, - downsizeAfterUnderutilizedFor: Duration = 72.hours, - explorationProbability: Double = 0.4, - weightOfLatestMetric: Double = 0.5) extends OptimalSizeExploringResizer { + lowerBound: PoolSize = 1, + upperBound: PoolSize = 30, + chanceOfScalingDownWhenFull: Double = 0.2, + actionInterval: Duration = 5.seconds, + numOfAdjacentSizesToConsiderDuringOptimization: Int = 16, + exploreStepSize: Double = 0.1, + downsizeRatio: Double = 0.8, + downsizeAfterUnderutilizedFor: Duration = 72.hours, + explorationProbability: Double = 0.4, + weightOfLatestMetric: Double = 0.5) extends OptimalSizeExploringResizer { /** * Leave package accessible for testing purpose */ diff --git a/akka-actor/src/main/scala/akka/routing/Random.scala b/akka-actor/src/main/scala/akka/routing/Random.scala index 7984033..19f00e1 100644 --- a/akka-actor/src/main/scala/akka/routing/Random.scala +++ b/akka-actor/src/main/scala/akka/routing/Random.scala @@ -59,8 +59,8 @@ final class RandomRoutingLogic extends RoutingLogic { final case class RandomPool( override val nrOfInstances: Int, override val resizer: Option[Resizer] = None, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool with PoolOverrideUnsetConfig[RandomPool] { def this(config: Config) = @@ -119,8 +119,8 @@ final case class RandomPool( */ @SerialVersionUID(1L) final case class RandomGroup( - override val paths: immutable.Iterable[String], - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + override val paths: immutable.Iterable[String], + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config) = diff --git a/akka-actor/src/main/scala/akka/routing/Resizer.scala b/akka-actor/src/main/scala/akka/routing/Resizer.scala index cb10d04..f8b6614 100644 --- a/akka-actor/src/main/scala/akka/routing/Resizer.scala +++ b/akka-actor/src/main/scala/akka/routing/Resizer.scala @@ -262,13 +262,13 @@ case class DefaultResizer( * INTERNAL API */ private[akka] final class ResizablePoolCell( - _system: ActorSystemImpl, - _ref: InternalActorRef, - _routerProps: Props, + _system: ActorSystemImpl, + _ref: InternalActorRef, + _routerProps: Props, _routerDispatcher: MessageDispatcher, - _routeeProps: Props, - _supervisor: InternalActorRef, - val pool: Pool) + _routeeProps: Props, + _supervisor: InternalActorRef, + val pool: Pool) extends RoutedActorCell(_system, _ref, _routerProps, _routerDispatcher, _routeeProps, _supervisor) { require(pool.resizer.isDefined, "RouterConfig must be a Pool with defined resizer") diff --git a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala index 3ae0016..b4be0fb 100644 --- a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala +++ b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala @@ -67,8 +67,8 @@ final class RoundRobinRoutingLogic extends RoutingLogic { final case class RoundRobinPool( override val nrOfInstances: Int, override val resizer: Option[Resizer] = None, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool with PoolOverrideUnsetConfig[RoundRobinPool] { def this(config: Config) = @@ -127,8 +127,8 @@ final case class RoundRobinPool( */ @SerialVersionUID(1L) final case class RoundRobinGroup( - override val paths: immutable.Iterable[String], - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + override val paths: immutable.Iterable[String], + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config) = diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala index 4c10cab..0cccc6f 100644 --- a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala +++ b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala @@ -35,12 +35,12 @@ private[akka] object RoutedActorCell { * INTERNAL API */ private[akka] class RoutedActorCell( - _system: ActorSystemImpl, - _ref: InternalActorRef, - _routerProps: Props, + _system: ActorSystemImpl, + _ref: InternalActorRef, + _routerProps: Props, _routerDispatcher: MessageDispatcher, - val routeeProps: Props, - _supervisor: InternalActorRef) + val routeeProps: Props, + _supervisor: InternalActorRef) extends ActorCell(_system, _ref, _routerProps, _routerDispatcher, _supervisor) { private[akka] val routerConfig = _routerProps.routerConfig diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala index 4cfade0..3e52b4c 100644 --- a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala +++ b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala @@ -22,13 +22,13 @@ import akka.dispatch.MessageDispatcher * send a message to one (or more) of these actors. */ private[akka] class RoutedActorRef( - _system: ActorSystemImpl, - _routerProps: Props, + _system: ActorSystemImpl, + _routerProps: Props, _routerDispatcher: MessageDispatcher, - _routerMailbox: MailboxType, - _routeeProps: Props, - _supervisor: InternalActorRef, - _path: ActorPath) + _routerMailbox: MailboxType, + _routeeProps: Props, + _supervisor: InternalActorRef, + _path: ActorPath) extends RepointableActorRef(_system, _routerProps, _routerDispatcher, _routerMailbox, _supervisor, _path) { // verify that a BalancingDispatcher is not used with a Router diff --git a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala index 347493d..6e15344 100644 --- a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala +++ b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala @@ -3,7 +3,6 @@ */ package akka.routing - import scala.collection.immutable import akka.ConfigurationException import akka.actor.ActorContext @@ -282,9 +281,9 @@ case object FromConfig extends FromConfig { */ def getInstance = this @inline final def apply( - resizer: Option[Resizer] = None, + resizer: Option[Resizer] = None, supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - routerDispatcher: String = Dispatchers.DefaultDispatcherId) = + routerDispatcher: String = Dispatchers.DefaultDispatcherId) = new FromConfig(resizer, supervisorStrategy, routerDispatcher) @inline final def unapply(fc: FromConfig): Option[String] = Some(fc.routerDispatcher) @@ -297,9 +296,9 @@ case object FromConfig extends FromConfig { * (defaults to default-dispatcher). */ @SerialVersionUID(1L) -class FromConfig(override val resizer: Option[Resizer], +class FromConfig(override val resizer: Option[Resizer], override val supervisorStrategy: SupervisorStrategy, - override val routerDispatcher: String) extends Pool { + override val routerDispatcher: String) extends Pool { def this() = this(None, Pool.defaultSupervisorStrategy, Dispatchers.DefaultDispatcherId) diff --git a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala index 0a79e3f..75d2e3b 100644 --- a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala +++ b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala @@ -97,10 +97,10 @@ private[akka] final case class ScatterGatherFirstCompletedRoutees( @SerialVersionUID(1L) final case class ScatterGatherFirstCompletedPool( override val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - within: FiniteDuration, + within: FiniteDuration, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool with PoolOverrideUnsetConfig[ScatterGatherFirstCompletedPool] { def this(config: Config) = @@ -165,9 +165,9 @@ final case class ScatterGatherFirstCompletedPool( */ @SerialVersionUID(1L) final case class ScatterGatherFirstCompletedGroup( - override val paths: immutable.Iterable[String], - within: FiniteDuration, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + override val paths: immutable.Iterable[String], + within: FiniteDuration, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config) = diff --git a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala index ae7c027..c073385 100644 --- a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala +++ b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala @@ -45,11 +45,11 @@ class SmallestMailboxRoutingLogic extends RoutingLogic { // 4. An ActorRef with unknown mailbox size that isn't processing anything // 5. An ActorRef with a known mailbox size // 6. An ActorRef without any messages - @tailrec private def selectNext(targets: immutable.IndexedSeq[Routee], - proposedTarget: Routee = NoRoutee, - currentScore: Long = Long.MaxValue, - at: Int = 0, - deep: Boolean = false): Routee = { + @tailrec private def selectNext(targets: immutable.IndexedSeq[Routee], + proposedTarget: Routee = NoRoutee, + currentScore: Long = Long.MaxValue, + at: Int = 0, + deep: Boolean = false): Routee = { if (targets.isEmpty) NoRoutee else if (at >= targets.size) { @@ -174,8 +174,8 @@ class SmallestMailboxRoutingLogic extends RoutingLogic { final case class SmallestMailboxPool( override val nrOfInstances: Int, override val resizer: Option[Resizer] = None, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool with PoolOverrideUnsetConfig[SmallestMailboxPool] { def this(config: Config) = diff --git a/akka-actor/src/main/scala/akka/routing/TailChopping.scala b/akka-actor/src/main/scala/akka/routing/TailChopping.scala index c2d5d59..213cca5 100644 --- a/akka-actor/src/main/scala/akka/routing/TailChopping.scala +++ b/akka-actor/src/main/scala/akka/routing/TailChopping.scala @@ -142,11 +142,11 @@ private[akka] final case class TailChoppingRoutees( @SerialVersionUID(1L) final case class TailChoppingPool( override val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - within: FiniteDuration, - interval: FiniteDuration, + within: FiniteDuration, + interval: FiniteDuration, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool with PoolOverrideUnsetConfig[TailChoppingPool] { def this(config: Config) = @@ -227,10 +227,10 @@ final case class TailChoppingPool( * router management messages */ final case class TailChoppingGroup( - override val paths: immutable.Iterable[String], - within: FiniteDuration, - interval: FiniteDuration, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { + override val paths: immutable.Iterable[String], + within: FiniteDuration, + interval: FiniteDuration, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config) = this( diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index c59492f..e526d67 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -35,7 +35,7 @@ object Serialization { private final def configToMap(path: String): Map[String, String] = { import scala.collection.JavaConverters._ - config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ (k -> v.toString) } + config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ (k → v.toString) } } } @@ -194,7 +194,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * loading is performed by the system’s [[akka.actor.DynamicAccess]]. */ def serializerOf(serializerFQN: String): Try[Serializer] = - system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, List(classOf[ExtendedActorSystem] -> system)) recoverWith { + system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, List(classOf[ExtendedActorSystem] → system)) recoverWith { case _: NoSuchMethodException ⇒ system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, Nil) } @@ -203,7 +203,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * By default always contains the following mapping: "java" -> akka.serialization.JavaSerializer */ private val serializers: Map[String, Serializer] = - for ((k: String, v: String) ← settings.Serializers) yield k -> serializerOf(v).get + for ((k: String, v: String) ← settings.Serializers) yield k → serializerOf(v).get /** * bindings is a Seq of tuple representing the mapping from Class to Serializer. @@ -244,7 +244,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * Maps from a Serializer Identity (Int) to a Serializer instance (optimization) */ val serializerByIdentity: Map[Int, Serializer] = - Map(NullSerializer.identifier -> NullSerializer) ++ serializers map { case (_, v) ⇒ (v.identifier, v) } + Map(NullSerializer.identifier → NullSerializer) ++ serializers map { case (_, v) ⇒ (v.identifier, v) } private def shouldWarnAboutJavaSerializer(serializedClass: Class[_], serializer: Serializer) = settings.config.getBoolean("akka.actor.warn-about-java-serializer-usage") && diff --git a/akka-actor/src/main/scala/akka/util/BoxedType.scala b/akka-actor/src/main/scala/akka/util/BoxedType.scala index 51286f7..87db921 100644 --- a/akka-actor/src/main/scala/akka/util/BoxedType.scala +++ b/akka-actor/src/main/scala/akka/util/BoxedType.scala @@ -7,15 +7,15 @@ object BoxedType { import java.{ lang ⇒ jl } private val toBoxed = Map[Class[_], Class[_]]( - classOf[Boolean] -> classOf[jl.Boolean], - classOf[Byte] -> classOf[jl.Byte], - classOf[Char] -> classOf[jl.Character], - classOf[Short] -> classOf[jl.Short], - classOf[Int] -> classOf[jl.Integer], - classOf[Long] -> classOf[jl.Long], - classOf[Float] -> classOf[jl.Float], - classOf[Double] -> classOf[jl.Double], - classOf[Unit] -> classOf[scala.runtime.BoxedUnit]) + classOf[Boolean] → classOf[jl.Boolean], + classOf[Byte] → classOf[jl.Byte], + classOf[Char] → classOf[jl.Character], + classOf[Short] → classOf[jl.Short], + classOf[Int] → classOf[jl.Integer], + classOf[Long] → classOf[jl.Long], + classOf[Float] → classOf[jl.Float], + classOf[Double] → classOf[jl.Double], + classOf[Unit] → classOf[scala.runtime.BoxedUnit]) final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c } diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index 9e35287..8723ac5 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -357,7 +357,7 @@ object ByteString { private[akka] object Companion { private val companionMap = Seq(ByteString1, ByteString1C, ByteStrings). - map(x ⇒ x.SerializationIdentity -> x).toMap. + map(x ⇒ x.SerializationIdentity → x).toMap. withDefault(x ⇒ throw new IllegalArgumentException("Invalid serialization id " + x)) def apply(from: Byte): Companion = companionMap(from) diff --git a/akka-actor/src/main/scala/akka/util/LineNumbers.scala b/akka-actor/src/main/scala/akka/util/LineNumbers.scala index 3aa1ea0..d9bb9f9 100644 --- a/akka-actor/src/main/scala/akka/util/LineNumbers.scala +++ b/akka-actor/src/main/scala/akka/util/LineNumbers.scala @@ -187,7 +187,7 @@ object LineNumbers { val cl = c.getClassLoader val r = cl.getResourceAsStream(resource) if (debug) println(s"LNB: resource '$resource' resolved to stream $r") - Option(r).map(_ -> None) + Option(r).map(_ → None) } private def getStreamForLambda(l: AnyRef): Option[(InputStream, Some[String])] = @@ -269,7 +269,7 @@ object LineNumbers { val count = d.readUnsignedShort() if (debug) println(s"LNB: reading $count methods") if (c.contains("Code") && c.contains("LineNumberTable")) { - (1 to count).map(_ ⇒ readMethod(d, c("Code"), c("LineNumberTable"), filter)).flatten.foldLeft(Int.MaxValue -> 0) { + (1 to count).map(_ ⇒ readMethod(d, c("Code"), c("LineNumberTable"), filter)).flatten.foldLeft(Int.MaxValue → 0) { case ((low, high), (start, end)) ⇒ (Math.min(low, start), Math.max(high, end)) } match { case (Int.MaxValue, 0) ⇒ None @@ -282,10 +282,10 @@ object LineNumbers { } } - private def readMethod(d: DataInputStream, - codeTag: Int, + private def readMethod(d: DataInputStream, + codeTag: Int, lineNumberTableTag: Int, - filter: Option[String])(implicit c: Constants): Option[(Int, Int)] = { + filter: Option[String])(implicit c: Constants): Option[(Int, Int)] = { skip(d, 2) // access flags val name = d.readUnsignedShort() // name skip(d, 2) // signature @@ -315,7 +315,7 @@ object LineNumbers { skip(d, 2) // start PC d.readUnsignedShort() // finally: the line number } - Some(lines.min -> lines.max) + Some(lines.min → lines.max) } } if (debug) println(s"LNB: nested attributes yielded: $possibleLines") diff --git a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala index 0ba6d45..dc86cf2 100644 --- a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala +++ b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala @@ -127,7 +127,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ if (!found) { val v = values + value val n = new Nonroot(root, key, v) - integrate(n) ++ n.innerAddValue(key, value) :+ (key -> v) + integrate(n) ++ n.innerAddValue(key, value) :+ (key → v) } else ch } diff --git a/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala b/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala index 44e5856..ead5204 100644 --- a/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala +++ b/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala @@ -9,10 +9,10 @@ object BenchRunner { import scala.collection.JavaConversions._ val args2 = args.toList.flatMap { - case "quick" => "-i 1 -wi 1 -f1 -t1".split(" ").toList - case "full" => "-i 10 -wi 4 -f3 -t1".split(" ").toList - case "jitwatch" => "-jvmArgs=-XX:+UnlockDiagnosticVMOptions -XX:+TraceClassLoading -XX:+LogCompilation" :: Nil - case other => other :: Nil + case "quick" ⇒ "-i 1 -wi 1 -f1 -t1".split(" ").toList + case "full" ⇒ "-i 10 -wi 4 -f3 -t1".split(" ").toList + case "jitwatch" ⇒ "-jvmArgs=-XX:+UnlockDiagnosticVMOptions -XX:+TraceClassLoading -XX:+LogCompilation" :: Nil + case other ⇒ other :: Nil } val opts = new CommandLineOptions(args2: _*) @@ -20,7 +20,7 @@ object BenchRunner { val report = results.map { result: RunResult ⇒ val bench = result.getParams.getBenchmark - val params = result.getParams.getParamsKeys.map(key => s"$key=${result.getParams.getParam(key)}").mkString("_") + val params = result.getParams.getParamsKeys.map(key ⇒ s"$key=${result.getParams.getParam(key)}").mkString("_") val score = result.getAggregatedResult.getPrimaryResult.getScore.round val unit = result.getAggregatedResult.getPrimaryResult.getScoreUnit s"\t${bench}_${params}\t$score\t$unit" diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala index 3dd2b61..ae70d2a 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala @@ -11,11 +11,8 @@ import scala.concurrent.Await /* regex checking: [info] a.a.ActorCreationBenchmark.synchronousStarting ss 120000 28.285 0.481 us - hand checking: [info] a.a.ActorCreationBenchmark.synchronousStarting ss 120000 21.496 0.502 us - - */ @State(Scope.Benchmark) @BenchmarkMode(Array(Mode.SingleShotTime)) diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorPathValidationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorPathValidationBenchmark.scala index f0295f5..bd6b8ca 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ActorPathValidationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorPathValidationBenchmark.scala @@ -11,7 +11,7 @@ import org.openjdk.jmh.annotations.Fork import org.openjdk.jmh.annotations.Measurement import org.openjdk.jmh.annotations.Mode import org.openjdk.jmh.annotations.OutputTimeUnit -import org.openjdk.jmh.annotations.{ Scope => JmhScope } +import org.openjdk.jmh.annotations.{ Scope ⇒ JmhScope } import org.openjdk.jmh.annotations.State import org.openjdk.jmh.annotations.Warmup @@ -19,7 +19,6 @@ import org.openjdk.jmh.annotations.Warmup [info] Benchmark Mode Samples Score Score error Units [info] a.a.ActorPathValidationBenchmark.handLoop7000 thrpt 20 0.070 0.002 ops/us [info] a.a.ActorPathValidationBenchmark.old7000 -- blows up (stack overflow) -- - [info] a.a.ActorPathValidationBenchmark.handLoopActor_1 thrpt 20 38.825 3.378 ops/us [info] a.a.ActorPathValidationBenchmark.oldActor_1 thrpt 20 1.585 0.090 ops/us */ diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala index b622830..7fd7623 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala @@ -103,9 +103,9 @@ object ForkJoinActorBenchmark { final val messages = 400000 class Pipe(next: Option[ActorRef]) extends Actor { def receive = { - case m @ `message` => + case m @ `message` ⇒ if (next.isDefined) next.get forward m - case s @ `stop` => + case s @ `stop` ⇒ context stop self if (next.isDefined) next.get forward s } @@ -113,7 +113,7 @@ object ForkJoinActorBenchmark { class PingPong extends Actor { var left = messages / 2 def receive = { - case `message` => + case `message` ⇒ if (left <= 1) context stop self diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala index 84bee09..f315347 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala @@ -85,7 +85,7 @@ class ScheduleBenchmark { @Benchmark def multipleScheduleOnce = { - val tryWithNext = (1 to to).foldLeft(0.millis -> List[Cancellable]()) { + val tryWithNext = (1 to to).foldLeft(0.millis → List[Cancellable]()) { case ((interv, c), idx) ⇒ (interv + interval, scheduler.scheduleOnce(interv) { op(idx) diff --git a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala index b4ca986..c860f26 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala @@ -13,7 +13,7 @@ import java.util.concurrent.TimeUnit object StashCreationBenchmark { class StashingActor extends Actor with Stash { def receive = { - case msg => sender() ! msg + case msg ⇒ sender() ! msg } } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala index 7c0ae23..41bbc29 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala @@ -120,12 +120,12 @@ object TellOnlyBenchmark { } class DroppingDispatcher( - _configurator: MessageDispatcherConfigurator, - _id: String, - _throughput: Int, - _throughputDeadlineTime: Duration, + _configurator: MessageDispatcherConfigurator, + _id: String, + _throughput: Int, + _throughputDeadlineTime: Duration, _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, - _shutdownTimeout: FiniteDuration) + _shutdownTimeout: FiniteDuration) extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) { override protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = { diff --git a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala index 6c56afa..3015d4b 100644 --- a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetMergeBenchmark.scala @@ -10,7 +10,7 @@ import org.openjdk.jmh.annotations.Fork import org.openjdk.jmh.annotations.Measurement import org.openjdk.jmh.annotations.Mode import org.openjdk.jmh.annotations.OutputTimeUnit -import org.openjdk.jmh.annotations.{ Scope => JmhScope } +import org.openjdk.jmh.annotations.{ Scope ⇒ JmhScope } import org.openjdk.jmh.annotations.State import org.openjdk.jmh.annotations.Warmup import akka.cluster.UniqueAddress @@ -49,7 +49,7 @@ class ORSetMergeBenchmark { @Setup(Level.Trial) def setup() { - set1 = (1 to set1Size).foldLeft(ORSet.empty[String])((s, n) => s.add(nextNode(), "elem" + n)) + set1 = (1 to set1Size).foldLeft(ORSet.empty[String])((s, n) ⇒ s.add(nextNode(), "elem" + n)) addFromSameNode = set1.add(nodeA, "elem" + set1Size + 1).merge(set1) addFromOtherNode = set1.add(nodeB, "elem" + set1Size + 1).merge(set1) complex1 = set1.add(nodeB, "a").add(nodeC, "b").remove(nodeD, "elem" + set1Size).merge(set1) diff --git a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala index 4e75b8d..c073409 100644 --- a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala @@ -10,7 +10,7 @@ import org.openjdk.jmh.annotations.Fork import org.openjdk.jmh.annotations.Measurement import org.openjdk.jmh.annotations.Mode import org.openjdk.jmh.annotations.OutputTimeUnit -import org.openjdk.jmh.annotations.{ Scope => JmhScope } +import org.openjdk.jmh.annotations.{ Scope ⇒ JmhScope } import org.openjdk.jmh.annotations.State import org.openjdk.jmh.annotations.Warmup import akka.cluster.UniqueAddress @@ -46,7 +46,7 @@ class VersionVectorBenchmark { @Setup(Level.Trial) def setup() { - vv1 = (1 to size).foldLeft(VersionVector.empty)((vv, n) => vv + nextNode()) + vv1 = (1 to size).foldLeft(VersionVector.empty)((vv, n) ⇒ vv + nextNode()) vv2 = vv1 + nextNode() vv3 = vv1 + nextNode() dot1 = VersionVector(nodeA, vv1.versionAt(nodeA)) diff --git a/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala index 6e238ef..56ccded 100644 --- a/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/dispatch/CachingConfigBenchmark.scala @@ -21,7 +21,7 @@ class CachingConfigBenchmark { val deepConfig = ConfigFactory.parseString(deepConfigString) val deepCaching = new CachingConfig(deepConfig) - @Benchmark def deep_config = deepConfig.hasPath(deepKey) + @Benchmark def deep_config = deepConfig.hasPath(deepKey) @Benchmark def deep_caching = deepCaching.hasPath(deepKey) } diff --git a/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala index 5b01387..07a68ee 100644 --- a/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/http/HttpBenchmark.scala @@ -69,7 +69,7 @@ class HttpBenchmark { @Benchmark def single_request_pool() = { import system.dispatcher - val (response, id) = Await.result(Source.single(HttpRequest(uri = "/test") -> 42).via(pool).runWith(Sink.head), 1.second) + val (response, id) = Await.result(Source.single(HttpRequest(uri = "/test") → 42).via(pool).runWith(Sink.head), 1.second) Await.result(Unmarshal(response.get.entity).to[String], 1.second) } } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala b/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala index 8c2fc8c..2d2893e 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala @@ -8,7 +8,7 @@ import akka.actor.Actor /** only as a "the best we could possibly get" baseline, does not persist anything */ class BaselineActor(respondAfter: Int) extends Actor { override def receive = { - case n: Int => if (n == respondAfter) sender() ! n + case n: Int ⇒ if (n == respondAfter) sender() ! n } } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala index 550c5d4..8b8a56c 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala @@ -18,11 +18,9 @@ import org.openjdk.jmh.annotations._ # OS: OSX 10.9.3 # CPU: Intel(R) Core(TM) i7-4850HQ CPU @ 2.30GHz # Date: Mon Jul 23 11:07:42 CEST 2014 - This bench emulates what we provide with "Processor batching". As expected, batching writes is better than writing 1 by 1. The important thing though is that there didn't appear to be any "write latency spikes" throughout this bench. - [info] Benchmark Mode Samples Score Score error Units [info] a.p.LevelDbBatchingBenchmark.write_1 avgt 20 0.799 0.011 ms/op [info] a.p.LevelDbBatchingBenchmark.writeBatch_10 avgt 20 0.117 0.001 ms/op diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala index 517ce82..dc44912 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala @@ -16,7 +16,6 @@ import scala.concurrent.Await # OS: OSX 10.9.3 # CPU: Intel(R) Core(TM) i7-4850HQ CPU @ 2.30GHz # Date: Mon Jun 9 13:22:42 CEST 2014 - [info] Benchmark Mode Samples Mean Mean error Units [info] a.p.PersistentActorDeferBenchmark.tell_persistAsync_defer_persistAsync_reply thrpt 10 6.858 0.515 ops/ms [info] a.p.PersistentActorDeferBenchmark.tell_persistAsync_defer_persistAsync_replyASAP thrpt 10 20.256 2.941 ops/ms @@ -64,7 +63,7 @@ class PersistentActorDeferBenchmark { @Benchmark @OperationsPerInvocation(10000) def tell_persistAsync_defer_persistAsync_reply() { - for (i <- data10k) persistAsync_defer.tell(i, probe.ref) + for (i ← data10k) persistAsync_defer.tell(i, probe.ref) probe.expectMsg(data10k.last) } @@ -72,7 +71,7 @@ class PersistentActorDeferBenchmark { @Benchmark @OperationsPerInvocation(10000) def tell_persistAsync_defer_persistAsync_replyASAP() { - for (i <- data10k) persistAsync_defer_replyASAP.tell(i, probe.ref) + for (i ← data10k) persistAsync_defer_replyASAP.tell(i, probe.ref) probe.expectMsg(data10k.last) } @@ -84,12 +83,12 @@ class `persistAsync, defer`(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => - persistAsync(Evt(n)) { e => } - deferAsync(Evt(n)) { e => if (e.i == respondAfter) sender() ! e.i } + case n: Int ⇒ + persistAsync(Evt(n)) { e ⇒ } + deferAsync(Evt(n)) { e ⇒ if (e.i == respondAfter) sender() ! e.i } } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } } class `persistAsync, defer, respond ASAP`(respondAfter: Int) extends PersistentActor { @@ -97,12 +96,12 @@ class `persistAsync, defer, respond ASAP`(respondAfter: Int) extends PersistentA override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => - persistAsync(Evt(n)) { e => } - deferAsync(Evt(n)) { e => } + case n: Int ⇒ + persistAsync(Evt(n)) { e ⇒ } + deferAsync(Evt(n)) { e ⇒ } if (n == respondAfter) sender() ! n } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala index 2d5d239..f433d19 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala @@ -62,7 +62,7 @@ class PersistentActorThroughputBenchmark { @Benchmark @OperationsPerInvocation(10000) def actor_normalActor_reply_baseline() { - for (i <- data10k) actor.tell(i, probe.ref) + for (i ← data10k) actor.tell(i, probe.ref) probe.expectMsg(data10k.last) } @@ -70,7 +70,7 @@ class PersistentActorThroughputBenchmark { @Benchmark @OperationsPerInvocation(10000) def persistentActor_persist_reply() { - for (i <- data10k) persistPersistentActor.tell(i, probe.ref) + for (i ← data10k) persistPersistentActor.tell(i, probe.ref) probe.expectMsg(Evt(data10k.last)) } @@ -78,7 +78,7 @@ class PersistentActorThroughputBenchmark { @Benchmark @OperationsPerInvocation(10000) def persistentActor_persistAsync_reply() { - for (i <- data10k) persistAsync1PersistentActor.tell(i, probe.ref) + for (i ← data10k) persistAsync1PersistentActor.tell(i, probe.ref) probe.expectMsg(Evt(data10k.last)) } @@ -86,7 +86,7 @@ class PersistentActorThroughputBenchmark { @Benchmark @OperationsPerInvocation(10000) def persistentActor_noPersist_reply() { - for (i <- data10k) noPersistPersistentActor.tell(i, probe.ref) + for (i ← data10k) noPersistPersistentActor.tell(i, probe.ref) probe.expectMsg(Evt(data10k.last)) } @@ -94,7 +94,7 @@ class PersistentActorThroughputBenchmark { @Benchmark @OperationsPerInvocation(10000) def persistentActor_persistAsync_replyRightOnCommandReceive() { - for (i <- data10k) persistAsyncQuickReplyPersistentActor.tell(i, probe.ref) + for (i ← data10k) persistAsyncQuickReplyPersistentActor.tell(i, probe.ref) probe.expectMsg(Evt(data10k.last)) } @@ -106,10 +106,10 @@ class NoPersistPersistentActor(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => if (n == respondAfter) sender() ! Evt(n) + case n: Int ⇒ if (n == respondAfter) sender() ! Evt(n) } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } } @@ -118,10 +118,10 @@ class PersistPersistentActor(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => persist(Evt(n)) { e => if (e.i == respondAfter) sender() ! e } + case n: Int ⇒ persist(Evt(n)) { e ⇒ if (e.i == respondAfter) sender() ! e } } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } } @@ -130,11 +130,11 @@ class PersistAsyncPersistentActor(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => - persistAsync(Evt(n)) { e => if (e.i == respondAfter) sender() ! e } + case n: Int ⇒ + persistAsync(Evt(n)) { e ⇒ if (e.i == respondAfter) sender() ! e } } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } } @@ -143,12 +143,12 @@ class PersistAsyncQuickReplyPersistentActor(respondAfter: Int) extends Persisten override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => + case n: Int ⇒ val e = Evt(n) if (n == respondAfter) sender() ! e persistAsync(e)(identity) } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala index 59c0d45..bb95b33 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala @@ -61,7 +61,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark { @Benchmark @OperationsPerInvocation(10000) def persistentActor_persistAsync_with_AtLeastOnceDelivery() { - for (i <- 1 to dataCount) + for (i ← 1 to dataCount) persistAsyncPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref) probe.expectMsg(20.seconds, Evt(dataCount)) } @@ -69,7 +69,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark { @Benchmark @OperationsPerInvocation(10000) def persistentActor_persist_with_AtLeastOnceDelivery() { - for (i <- 1 to dataCount) + for (i ← 1 to dataCount) persistPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref) probe.expectMsg(2.minutes, Evt(dataCount)) } @@ -77,7 +77,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark { @Benchmark @OperationsPerInvocation(10000) def persistentActor_noPersist_with_AtLeastOnceDelivery() { - for (i <- 1 to dataCount) + for (i ← 1 to dataCount) noPersistPersistentActorWithAtLeastOnceDelivery.tell(i, probe.ref) probe.expectMsg(20.seconds, Evt(dataCount)) } @@ -90,28 +90,28 @@ class NoPersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upS override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => - deliver(downStream)(deliveryId => Msg(deliveryId, n)) + case n: Int ⇒ + deliver(downStream)(deliveryId ⇒ Msg(deliveryId, n)) if (n == respondAfter) //switch to wait all message confirmed context.become(waitConfirm) - case Confirm(deliveryId) => + case Confirm(deliveryId) ⇒ confirmDelivery(deliveryId) - case _ => // do nothing + case _ ⇒ // do nothing } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } val waitConfirm: Actor.Receive = { - case Confirm(deliveryId) => + case Confirm(deliveryId) ⇒ confirmDelivery(deliveryId) if (numberOfUnconfirmed == 0) { upStream ! Evt(respondAfter) context.unbecome() } - case _ => // do nothing + case _ ⇒ // do nothing } } @@ -122,30 +122,30 @@ class PersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upStr override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => - persist(MsgSent(n)) { e => - deliver(downStream)(deliveryId => Msg(deliveryId, n)) + case n: Int ⇒ + persist(MsgSent(n)) { e ⇒ + deliver(downStream)(deliveryId ⇒ Msg(deliveryId, n)) if (n == respondAfter) //switch to wait all message confirmed context.become(waitConfirm) } - case Confirm(deliveryId) => + case Confirm(deliveryId) ⇒ confirmDelivery(deliveryId) - case _ => // do nothing + case _ ⇒ // do nothing } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } val waitConfirm: Actor.Receive = { - case Confirm(deliveryId) => + case Confirm(deliveryId) ⇒ confirmDelivery(deliveryId) if (numberOfUnconfirmed == 0) { upStream ! Evt(respondAfter) context.unbecome() } - case _ => // do nothing + case _ ⇒ // do nothing } } @@ -156,30 +156,30 @@ class PersistAsyncPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => - persistAsync(MsgSent(n)) { e => - deliver(downStream)(deliveryId => Msg(deliveryId, n)) + case n: Int ⇒ + persistAsync(MsgSent(n)) { e ⇒ + deliver(downStream)(deliveryId ⇒ Msg(deliveryId, n)) if (n == respondAfter) //switch to wait all message confirmed context.become(waitConfirm) } - case Confirm(deliveryId) => + case Confirm(deliveryId) ⇒ confirmDelivery(deliveryId) - case _ => // do nothing + case _ ⇒ // do nothing } override def receiveRecover = { - case _ => // do nothing + case _ ⇒ // do nothing } val waitConfirm: Actor.Receive = { - case Confirm(deliveryId) => + case Confirm(deliveryId) ⇒ confirmDelivery(deliveryId) if (numberOfUnconfirmed == 0) { upStream ! Evt(respondAfter) context.unbecome() } - case _ => // do nothing + case _ ⇒ // do nothing } } @@ -197,15 +197,15 @@ class DestinationActor extends Actor { var seqNr = 0L override def receive = { - case n: Int => + case n: Int ⇒ sender() ! Confirm(n) - case Msg(deliveryId, _) => + case Msg(deliveryId, _) ⇒ seqNr += 1 if (seqNr % 11 == 0) { //drop it } else { sender() ! Confirm(deliveryId) } - case _ => // do nothing + case _ ⇒ // do nothing } } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala index c331f17..c5cf709 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FlatMapMergeBenchmark.scala @@ -4,7 +4,7 @@ package akka.stream -import akka.{Done, NotUsed} +import akka.{ Done, NotUsed } import akka.actor.ActorSystem import akka.stream.scaladsl._ import java.util.concurrent.TimeUnit @@ -33,11 +33,11 @@ class FlatMapMergeBenchmark { def setup() { val source = NumberOfStreams match { // Base line: process NumberOfElements-many elements from a single source without using flatMapMerge - case 0 => createSource(NumberOfElements) + case 0 ⇒ createSource(NumberOfElements) // Stream merging: process NumberOfElements-many elements from n sources, each producing (NumberOfElements/n)-many elements - case n => + case n ⇒ val subSource = createSource(NumberOfElements / n) - Source.repeat(()).take(n).flatMapMerge(n, _ => subSource) + Source.repeat(()).take(n).flatMapMerge(n, _ ⇒ subSource) } graph = Source.fromGraph(source).toMat(Sink.ignore)(Keep.right) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala index 07dff03..bae22be 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala @@ -127,7 +127,7 @@ class FlowMapBenchmark { } // source setup - private def mkMaps[O, Mat](source: Source[O, Mat], count: Int)(flow: => Graph[FlowShape[O, O], _]): Source[O, Mat] = { + private def mkMaps[O, Mat](source: Source[O, Mat], count: Int)(flow: ⇒ Graph[FlowShape[O, O], _]): Source[O, Mat] = { var f = source for (i ← 1 to count) f = f.via(flow) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala index fe3580d..ad9d2a4 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala @@ -1,13 +1,12 @@ package akka.stream import akka.event._ -import akka.stream.impl.fusing.{ GraphInterpreterSpecKit, GraphStages} +import akka.stream.impl.fusing.{ GraphInterpreterSpecKit, GraphStages } import akka.stream.impl.fusing.GraphStages import akka.stream.impl.fusing.GraphInterpreter.{ DownstreamBoundaryStageLogic, UpstreamBoundaryStageLogic } import akka.stream.stage._ import org.openjdk.jmh.annotations._ - import java.util.concurrent.TimeUnit @State(Scope.Benchmark) @@ -36,7 +35,7 @@ class InterpreterBenchmark { .connect(identities.last.out, sink) // FIXME: This should not be here, this is pure setup overhead - for (i <- (0 until identities.size - 1)) { + for (i ← (0 until identities.size - 1)) { b.connect(identities(i).out, identities(i + 1).in) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala index 0268f17..d21de32 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala @@ -14,21 +14,21 @@ import scala.concurrent.duration._ object MaterializationBenchmark { - val flowWithMapBuilder = (numOfCombinators: Int) => { + val flowWithMapBuilder = (numOfCombinators: Int) ⇒ { var source = Source.single(()) - for (_ <- 1 to numOfCombinators) { + for (_ ← 1 to numOfCombinators) { source = source.map(identity) } source.to(Sink.ignore) } - val graphWithJunctionsBuilder = (numOfJunctions: Int) => - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => + val graphWithJunctionsBuilder = (numOfJunctions: Int) ⇒ + RunnableGraph.fromGraph(GraphDSL.create() { implicit b ⇒ import GraphDSL.Implicits._ val broadcast = b.add(Broadcast[Unit](numOfJunctions)) var outlet = broadcast.out(0) - for (i <- 1 until numOfJunctions) { + for (i ← 1 until numOfJunctions) { val merge = b.add(Merge[Unit](2)) outlet ~> merge broadcast.out(i) ~> merge @@ -40,36 +40,33 @@ object MaterializationBenchmark { ClosedShape }) - val graphWithNestedImportsBuilder = (numOfNestedGraphs: Int) => { + val graphWithNestedImportsBuilder = (numOfNestedGraphs: Int) ⇒ { var flow: Graph[FlowShape[Unit, Unit], NotUsed] = Flow[Unit].map(identity) - for (_ <- 1 to numOfNestedGraphs) { - flow = GraphDSL.create(flow) { b ⇒ - flow ⇒ - FlowShape(flow.in, flow.out) + for (_ ← 1 to numOfNestedGraphs) { + flow = GraphDSL.create(flow) { b ⇒ flow ⇒ + FlowShape(flow.in, flow.out) } } - RunnableGraph.fromGraph(GraphDSL.create(flow) { implicit b ⇒ - flow ⇒ - import GraphDSL.Implicits._ - Source.single(()) ~> flow ~> Sink.ignore - ClosedShape + RunnableGraph.fromGraph(GraphDSL.create(flow) { implicit b ⇒ flow ⇒ + import GraphDSL.Implicits._ + Source.single(()) ~> flow ~> Sink.ignore + ClosedShape }) } - val graphWithImportedFlowBuilder = (numOfFlows: Int) => - RunnableGraph.fromGraph(GraphDSL.create(Source.single(())) { implicit b ⇒ - source ⇒ - import GraphDSL.Implicits._ - val flow = Flow[Unit].map(identity) - var out: Outlet[Unit] = source.out - for (i <- 0 until numOfFlows) { - val flowShape = b.add(flow) - out ~> flowShape - out = flowShape.outlet - } - out ~> Sink.ignore - ClosedShape + val graphWithImportedFlowBuilder = (numOfFlows: Int) ⇒ + RunnableGraph.fromGraph(GraphDSL.create(Source.single(())) { implicit b ⇒ source ⇒ + import GraphDSL.Implicits._ + val flow = Flow[Unit].map(identity) + var out: Outlet[Unit] = source.out + for (i ← 0 until numOfFlows) { + val flowShape = b.add(flow) + out ~> flowShape + out = flowShape.outlet + } + out ~> Sink.ignore + ClosedShape }) } diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala index c898788..09556b3 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala @@ -163,7 +163,7 @@ private[camel] class ProducerRegistrar(activationTracker: ActorRef) extends Acto try { val endpoint = camelContext.getEndpoint(endpointUri) val processor = new SendProcessor(endpoint) - camelObjects = camelObjects.updated(producer, endpoint -> processor) + camelObjects = camelObjects.updated(producer, endpoint → processor) // if this throws, the supervisor stops the producer and de-registers it on termination processor.start() producer ! CamelProducerObjects(endpoint, processor) diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index ccab13e..7475afd 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -49,9 +49,9 @@ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends D * [actorPath]?[options]%s, * where [actorPath] refers to the actor path to the actor. */ -private[camel] class ActorEndpoint(uri: String, - comp: ActorComponent, - val path: ActorEndpointPath, +private[camel] class ActorEndpoint(uri: String, + comp: ActorComponent, + val path: ActorEndpointPath, val camel: Camel) extends DefaultEndpoint(uri, comp) with ActorEndpointConfig { /** @@ -174,7 +174,7 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex path.findActorIn(camel.system) getOrElse (throw new ActorNotRegisteredException(path.actorPath)) private[this] def messageFor(exchange: CamelExchangeAdapter) = - exchange.toRequestMessage(Map(CamelMessage.MessageExchangeId -> exchange.getExchangeId)) + exchange.toRequestMessage(Map(CamelMessage.MessageExchangeId → exchange.getExchangeId)) } /** diff --git a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala index 1fd8d06..2043d7a 100644 --- a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala @@ -50,17 +50,17 @@ class CamelExchangeAdapterTest extends FunSuite with SharedCamelSystem { test("mustCreateRequestMessageFromInMessage") { val m = sampleInOnly.toRequestMessage - assert(m === CamelMessage("test-in", Map("key-in" -> "val-in"))) + assert(m === CamelMessage("test-in", Map("key-in" → "val-in"))) } test("mustCreateResponseMessageFromInMessage") { val m = sampleInOnly.toResponseMessage - assert(m === CamelMessage("test-in", Map("key-in" -> "val-in"))) + assert(m === CamelMessage("test-in", Map("key-in" → "val-in"))) } test("mustCreateResponseMessageFromOutMessage") { val m = sampleInOut.toResponseMessage - assert(m === CamelMessage("test-out", Map("key-out" -> "val-out"))) + assert(m === CamelMessage("test-out", Map("key-out" → "val-out"))) } test("mustCreateFailureMessageFromExceptionAndInMessage") { @@ -82,30 +82,30 @@ class CamelExchangeAdapterTest extends FunSuite with SharedCamelSystem { } test("mustCreateRequestMessageFromInMessageWithAdditionalHeader") { - val m = sampleInOnly.toRequestMessage(Map("x" -> "y")) - assert(m === CamelMessage("test-in", Map("key-in" -> "val-in", "x" -> "y"))) + val m = sampleInOnly.toRequestMessage(Map("x" → "y")) + assert(m === CamelMessage("test-in", Map("key-in" → "val-in", "x" → "y"))) } test("mustCreateResponseMessageFromInMessageWithAdditionalHeader") { - val m = sampleInOnly.toResponseMessage(Map("x" -> "y")) - assert(m === CamelMessage("test-in", Map("key-in" -> "val-in", "x" -> "y"))) + val m = sampleInOnly.toResponseMessage(Map("x" → "y")) + assert(m === CamelMessage("test-in", Map("key-in" → "val-in", "x" → "y"))) } test("mustCreateResponseMessageFromOutMessageWithAdditionalHeader") { - val m = sampleInOut.toResponseMessage(Map("x" -> "y")) - assert(m === CamelMessage("test-out", Map("key-out" -> "val-out", "x" -> "y"))) + val m = sampleInOut.toResponseMessage(Map("x" → "y")) + assert(m === CamelMessage("test-out", Map("key-out" → "val-out", "x" → "y"))) } test("mustCreateFailureMessageFromExceptionAndInMessageWithAdditionalHeader") { val e1 = sampleInOnly e1.setException(new Exception("test1")) assert(e1.toAkkaCamelException.getMessage === "test1") - val headers = e1.toAkkaCamelException(Map("x" -> "y")).headers + val headers = e1.toAkkaCamelException(Map("x" → "y")).headers assert(headers("key-in") === "val-in") assert(headers("x") === "y") assert(e1.toFailureMessage.cause.getMessage === "test1") - val failureHeaders = e1.toFailureResult(Map("x" -> "y")).headers + val failureHeaders = e1.toFailureResult(Map("x" → "y")).headers assert(failureHeaders("key-in") === "val-in") assert(failureHeaders("x") === "y") @@ -115,11 +115,11 @@ class CamelExchangeAdapterTest extends FunSuite with SharedCamelSystem { val e1 = sampleInOut e1.setException(new Exception("test2")) assert(e1.toAkkaCamelException.getMessage === "test2") - val headers = e1.toAkkaCamelException(Map("x" -> "y")).headers + val headers = e1.toAkkaCamelException(Map("x" → "y")).headers assert(headers("key-out") === "val-out") assert(headers("x") === "y") assert(e1.toFailureMessage.cause.getMessage === "test2") - val failureHeaders = e1.toFailureResult(Map("x" -> "y")).headers + val failureHeaders = e1.toFailureResult(Map("x" → "y")).headers assert(failureHeaders("key-out") === "val-out") assert(failureHeaders("x") === "y") } diff --git a/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala b/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala index f79ba3c..7bff0f7 100644 --- a/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala +++ b/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala @@ -25,7 +25,7 @@ class CamelMessageTest extends Matchers with WordSpecLike with SharedCamelSystem message.setExchange(new DefaultExchange(camel.context)) val attachmentToAdd = new DataHandler(new URL("https://another.url")) - CamelMessage.copyContent(new CamelMessage("body", Map("key" -> "baz"), Map("key" -> attachmentToAdd)), message) + CamelMessage.copyContent(new CamelMessage("body", Map("key" → "baz"), Map("key" → attachmentToAdd)), message) assert(message.getBody === "body") assert(message.getHeader("foo") === "bar") diff --git a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala index c48562b..b8edf30 100644 --- a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala @@ -67,8 +67,8 @@ class ConcurrentActivationTest extends WordSpec with Matchers with NonSharedCame } val (activatedConsumerNames, activatedProducerNames) = partitionNames(activations) val (deactivatedConsumerNames, deactivatedProducerNames) = partitionNames(deactivations) - assertContainsSameElements(activatedConsumerNames -> deactivatedConsumerNames) - assertContainsSameElements(activatedProducerNames -> deactivatedProducerNames) + assertContainsSameElements(activatedConsumerNames → deactivatedConsumerNames) + assertContainsSameElements(activatedProducerNames → deactivatedProducerNames) } finally { system.eventStream.publish(TestEvent.UnMute(eventFilter)) } @@ -95,7 +95,7 @@ class ConsumerBroadcast(promise: Promise[(Future[List[List[ActorRef]]], Future[L val routee = context.actorOf(Props(classOf[Registrar], i, number, activationListPromise, deactivationListPromise), "registrar-" + i) routee.path.toString } - promise.success(Future.sequence(allActivationFutures) -> Future.sequence(allDeactivationFutures)) + promise.success(Future.sequence(allActivationFutures) → Future.sequence(allDeactivationFutures)) broadcaster = Some(context.actorOf(BroadcastGroup(routeePaths).props(), "registrarRouter")) case reg: Any ⇒ diff --git a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala index 85fc931..3c37885 100644 --- a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala +++ b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala @@ -24,31 +24,31 @@ class MessageScalaTest extends FunSuite with Matchers with SharedCamelSystem { } test("mustConvertDoubleHeaderToString") { - val message = CamelMessage("test", Map("test" -> 1.4)) + val message = CamelMessage("test", Map("test" → 1.4)) message.headerAs[String]("test").get should ===("1.4") } test("mustReturnSubsetOfHeaders") { - val message = CamelMessage("test", Map("A" -> "1", "B" -> "2")) - message.headers(Set("B")) should ===(Map("B" -> "2")) + val message = CamelMessage("test", Map("A" → "1", "B" → "2")) + message.headers(Set("B")) should ===(Map("B" → "2")) } test("mustTransformBodyAndPreserveHeaders") { - CamelMessage("a", Map("A" -> "1")).mapBody((body: String) ⇒ body + "b") should ===(CamelMessage("ab", Map("A" -> "1"))) + CamelMessage("a", Map("A" → "1")).mapBody((body: String) ⇒ body + "b") should ===(CamelMessage("ab", Map("A" → "1"))) } test("mustConvertBodyAndPreserveHeaders") { - CamelMessage(1.4, Map("A" -> "1")).withBodyAs[String] should ===(CamelMessage("1.4", Map("A" -> "1"))) + CamelMessage(1.4, Map("A" → "1")).withBodyAs[String] should ===(CamelMessage("1.4", Map("A" → "1"))) } test("mustSetBodyAndPreserveHeaders") { - CamelMessage("test1", Map("A" -> "1")).copy(body = "test2") should ===( - CamelMessage("test2", Map("A" -> "1"))) + CamelMessage("test1", Map("A" → "1")).copy(body = "test2") should ===( + CamelMessage("test2", Map("A" → "1"))) } test("mustSetHeadersAndPreserveBody") { - CamelMessage("test1", Map("A" -> "1")).copy(headers = Map("C" -> "3")) should ===( - CamelMessage("test1", Map("C" -> "3"))) + CamelMessage("test1", Map("A" → "1")).copy(headers = Map("C" → "3")) should ===( + CamelMessage("test1", Map("C" → "3"))) } test("mustBeAbleToReReadStreamCacheBody") { diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala index 6106ff7..7dfeb91 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala @@ -45,9 +45,9 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "01 produce a message and receive normal response" in { val producer = system.actorOf(Props(new TestProducer("direct:producer-test-2", true)), name = "01-direct-producer-2") - val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123")) producer.tell(message, testActor) - expectMsg(CamelMessage("received TEST", Map(CamelMessage.MessageExchangeId -> "123"))) + expectMsg(CamelMessage("received TEST", Map(CamelMessage.MessageExchangeId → "123"))) } "02 produce a message and receive failure response" in { @@ -72,13 +72,13 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk supervisor.tell(Props(new TestProducer("direct:producer-test-2")), testActor) val producer = receiveOne(timeoutDuration).asInstanceOf[ActorRef] - val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { producer.tell(message, testActor) expectMsgPF(timeoutDuration) { case Failure(e: AkkaCamelException) ⇒ e.getMessage should ===("failure") - e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123")) + e.headers should ===(Map(CamelMessage.MessageExchangeId → "123")) } } Await.ready(latch, timeoutDuration) @@ -106,21 +106,21 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "10 produce message to direct:producer-test-3 and receive normal response" in { val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "10-direct-producer-test-3") - val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123")) producer.tell(message, testActor) - expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123"))) + expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123"))) } "11 produce message to direct:producer-test-3 and receive failure response" in { val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "11-direct-producer-test-3-receive-failure") - val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { producer.tell(message, testActor) expectMsgPF(timeoutDuration) { case Failure(e: AkkaCamelException) ⇒ e.getMessage should ===("failure") - e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123")) + e.headers should ===(Map(CamelMessage.MessageExchangeId → "123")) } } } @@ -128,22 +128,22 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "12 produce message, forward normal response of direct:producer-test-2 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "12-reply-forwarding-target") val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "12-direct-producer-test-2-forwarder") - val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123")) producer.tell(message, testActor) - expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result"))) + expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123", "test" → "result"))) } "13 produce message, forward failure response of direct:producer-test-2 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "13-reply-forwarding-target") val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "13-direct-producer-test-2-forwarder-failure") - val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { producer.tell(message, testActor) expectMsgPF(timeoutDuration) { case Failure(e: AkkaCamelException) ⇒ e.getMessage should ===("failure") - e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure")) + e.headers should ===(Map(CamelMessage.MessageExchangeId → "123", "test" → "failure")) } } } @@ -170,23 +170,23 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "16 produce message, forward normal response from direct:producer-test-3 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "16-reply-forwarding-target") val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "16-direct-producer-test-3-to-replying-actor") - val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123")) producer.tell(message, testActor) - expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result"))) + expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123", "test" → "result"))) } "17 produce message, forward failure response from direct:producer-test-3 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "17-reply-forwarding-target") val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "17-direct-producer-test-3-forward-failure") - val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { producer.tell(message, testActor) expectMsgPF(timeoutDuration) { case Failure(e: AkkaCamelException) ⇒ e.getMessage should ===("failure") - e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure")) + e.headers should ===(Map(CamelMessage.MessageExchangeId → "123", "test" → "failure")) } } } @@ -324,10 +324,10 @@ object ProducerFeatureTest { class ReplyingForwardTarget extends Actor { def receive = { case msg: CamelMessage ⇒ - context.sender() ! (msg.copy(headers = msg.headers + ("test" -> "result"))) + context.sender() ! (msg.copy(headers = msg.headers + ("test" → "result"))) case msg: akka.actor.Status.Failure ⇒ msg.cause match { - case e: AkkaCamelException ⇒ context.sender() ! Status.Failure(new AkkaCamelException(e, e.headers + ("test" -> "failure"))) + case e: AkkaCamelException ⇒ context.sender() ! Status.Failure(new AkkaCamelException(e, e.headers + ("test" → "failure"))) } } } diff --git a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala index 0ce10b7..4bd3d03 100644 --- a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala @@ -34,10 +34,10 @@ class UntypedProducerTest extends WordSpec with Matchers with BeforeAndAfterAll "produce a message and receive a normal response" in { val producer = system.actorOf(Props[SampleUntypedReplyingProducer], name = "sample-untyped-replying-producer") - val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId → "123")) val future = producer.ask(message)(timeout) - val expected = CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123")) + val expected = CamelMessage("received test", Map(CamelMessage.MessageExchangeId → "123")) Await.result(future, timeout) match { case result: CamelMessage ⇒ result should ===(expected) case unexpected ⇒ fail("Actor responded with unexpected message:" + unexpected) @@ -48,14 +48,14 @@ class UntypedProducerTest extends WordSpec with Matchers with BeforeAndAfterAll "produce a message and receive a failure response" in { val producer = system.actorOf(Props[SampleUntypedReplyingProducer], name = "sample-untyped-replying-producer-failure") - val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) + val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId → "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { val future = producer.ask(message)(timeout).failed Await.result(future, timeout) match { case e: AkkaCamelException ⇒ e.getMessage should ===("failure") - e.headers should ===(Map(CamelMessage.MessageExchangeId -> "123")) + e.headers should ===(Map(CamelMessage.MessageExchangeId → "123")) case unexpected ⇒ fail("Actor responded with unexpected message:" + unexpected) } } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala index 02c818c..9361a4b 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala @@ -44,7 +44,7 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension { * Supervision strategy. */ private[metrics] val strategy = system.dynamicAccess.createInstanceFor[SupervisorStrategy]( - SupervisorStrategyProvider, immutable.Seq(classOf[Config] -> SupervisorStrategyConfiguration)) + SupervisorStrategyProvider, immutable.Seq(classOf[Config] → SupervisorStrategyConfiguration)) .getOrElse { val log: LoggingAdapter = Logging(system, getClass.getName) log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[ClusterMetricsStrategy].getName}.") diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala index 9e80a1f..421c582 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala @@ -120,11 +120,11 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS */ @SerialVersionUID(1L) final case class AdaptiveLoadBalancingPool( - metricsSelector: MetricsSelector = MixMetricsSelector, - override val nrOfInstances: Int = 0, + metricsSelector: MetricsSelector = MixMetricsSelector, + override val nrOfInstances: Int = 0, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool { def this(config: Config, dynamicAccess: DynamicAccess) = @@ -200,9 +200,9 @@ final case class AdaptiveLoadBalancingPool( */ @SerialVersionUID(1L) final case class AdaptiveLoadBalancingGroup( - metricsSelector: MetricsSelector = MixMetricsSelector, - override val paths: immutable.Iterable[String] = Nil, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + metricsSelector: MetricsSelector = MixMetricsSelector, + override val paths: immutable.Iterable[String] = Nil, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config, dynamicAccess: DynamicAccess) = @@ -217,7 +217,7 @@ final case class AdaptiveLoadBalancingGroup( * sent with [[akka.actor.ActorSelection]] to these paths */ def this(metricsSelector: MetricsSelector, - routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths)) + routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths)) override def paths(system: ActorSystem): immutable.Iterable[String] = this.paths @@ -365,9 +365,9 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe combined.foldLeft(Map.empty[Address, (Double, Int)].withDefaultValue((0.0, 0))) { case (acc, (address, capacity)) ⇒ val (sum, count) = acc(address) - acc + (address -> ((sum + capacity, count + 1))) + acc + (address → ((sum + capacity, count + 1))) }.map { - case (addr, (sum, count)) ⇒ (addr -> sum / count) + case (addr, (sum, count)) ⇒ (addr → sum / count) } } @@ -381,7 +381,7 @@ object MetricsSelector { case "cpu" ⇒ CpuMetricsSelector case "load" ⇒ SystemLoadAverageMetricsSelector case fqn ⇒ - val args = List(classOf[Config] -> config) + val args = List(classOf[Config] → config) dynamicAccess.createInstanceFor[MetricsSelector](fqn, args).recover({ case exception ⇒ throw new IllegalArgumentException( (s"Cannot instantiate metrics-selector [$fqn], " + @@ -429,7 +429,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector { val (_, min) = capacity.minBy { case (_, c) ⇒ c } // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero val divisor = math.max(0.01, min) - capacity map { case (addr, c) ⇒ (addr -> math.round((c) / divisor).toInt) } + capacity map { case (addr, c) ⇒ (addr → math.round((c) / divisor).toInt) } } } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala index 3f8cf45..dd0338e 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala @@ -207,12 +207,12 @@ object StandardMetrics { */ @SerialVersionUID(1L) final case class Cpu( - address: Address, - timestamp: Long, + address: Address, + timestamp: Long, systemLoadAverage: Option[Double], - cpuCombined: Option[Double], - cpuStolen: Option[Double], - processors: Int) { + cpuCombined: Option[Double], + cpuStolen: Option[Double], + processors: Int) { cpuCombined match { case Some(x) ⇒ require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]") diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala index f33f54d..462c1a9 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala @@ -60,7 +60,7 @@ private[metrics] object MetricsCollector { def create(provider: String) = TryNative { log.debug(s"Trying ${provider}.") system.asInstanceOf[ExtendedActorSystem].dynamicAccess - .createInstanceFor[MetricsCollector](provider, List(classOf[ActorSystem] -> system)).get + .createInstanceFor[MetricsCollector](provider, List(classOf[ActorSystem] → system)).get } val collector = if (useCustom) diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index 15d6f2b..c8c6767 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -122,11 +122,11 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa Await.result(router ? GetRoutees, timeout.duration).asInstanceOf[Routees].routees def receiveReplies(expectedReplies: Int): Map[Address, Int] = { - val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) + val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0) (receiveWhile(5 seconds, messages = expectedReplies) { case Reply(address) ⇒ address }).foldLeft(zero) { - case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) + case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1)) } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index ce0d99b..237bae1 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -63,15 +63,15 @@ class MetricsSelectorSpec extends WordSpec with Matchers { "CapacityMetricsSelector" must { "calculate weights from capacity" in { - val capacity = Map(a1 -> 0.6, b1 -> 0.3, c1 -> 0.1) + val capacity = Map(a1 → 0.6, b1 → 0.3, c1 → 0.1) val weights = abstractSelector.weights(capacity) - weights should ===(Map(c1 -> 1, b1 -> 3, a1 -> 6)) + weights should ===(Map(c1 → 1, b1 → 3, a1 → 6)) } "handle low and zero capacity" in { - val capacity = Map(a1 -> 0.0, b1 -> 1.0, c1 -> 0.005, d1 -> 0.004) + val capacity = Map(a1 → 0.0, b1 → 1.0, c1 → 0.005, d1 → 0.004) val weights = abstractSelector.weights(capacity) - weights should ===(Map(a1 -> 0, b1 -> 100, c1 -> 1, d1 -> 0)) + weights should ===(Map(a1 → 0, b1 → 100, c1 → 1, d1 → 0)) } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala index 6534b75..aa991f8 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala @@ -92,7 +92,7 @@ class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollec } else None } } - streamingDataSet ++= changes.map(m ⇒ m.name -> m) + streamingDataSet ++= changes.map(m ⇒ m.name → m) } } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala index 425b484..6f6689b 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala @@ -49,11 +49,11 @@ case class SimpleSigarProvider(location: String = "native") extends SigarProvide * Provide sigar library as static mock. */ case class MockitoSigarProvider( - pid: Long = 123, + pid: Long = 123, loadAverage: Array[Double] = Array(0.7, 0.3, 0.1), - cpuCombined: Double = 0.5, - cpuStolen: Double = 0.2, - steps: Int = 5) extends SigarProvider with MockitoSugar { + cpuCombined: Double = 0.5, + cpuStolen: Double = 0.2, + steps: Int = 5) extends SigarProvider with MockitoSugar { import org.hyperic.sigar._ import org.mockito.Mockito._ diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index 2b92154..81cc5fe 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -193,11 +193,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, + typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, allocationStrategy: ShardAllocationStrategy, handOffStopMessage: Any): ActorRef = { @@ -232,11 +232,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, + typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId): ActorRef = { + extractShardId: ShardRegion.ExtractShardId): ActorRef = { val allocationStrategy = new LeastShardAllocationStrategy( settings.tuningParameters.leastShardAllocationRebalanceThreshold, @@ -265,10 +265,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, - messageExtractor: ShardRegion.MessageExtractor, + typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, + messageExtractor: ShardRegion.MessageExtractor, allocationStrategy: ShardAllocationStrategy, handOffStopMessage: Any): ActorRef = { @@ -301,9 +301,9 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, + typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, messageExtractor: ShardRegion.MessageExtractor): ActorRef = { val allocationStrategy = new LeastShardAllocationStrategy( @@ -333,10 +333,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ def startProxy( - typeName: String, - role: Option[String], + typeName: String, + role: Option[String], extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId): ActorRef = { + extractShardId: ShardRegion.ExtractShardId): ActorRef = { implicit val timeout = system.settings.CreationTimeout val settings = ClusterShardingSettings(system).withRole(role) @@ -363,8 +363,8 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ def startProxy( - typeName: String, - role: Optional[String], + typeName: String, + role: Optional[String], messageExtractor: ShardRegion.MessageExtractor): ActorRef = { startProxy(typeName, Option(role.orElse(null)), diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala index 82ca284..2931eb5 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala @@ -71,19 +71,19 @@ object ClusterShardingSettings { if (role == "") None else Option(role) class TuningParameters( - val coordinatorFailureBackoff: FiniteDuration, - val retryInterval: FiniteDuration, - val bufferSize: Int, - val handOffTimeout: FiniteDuration, - val shardStartTimeout: FiniteDuration, - val shardFailureBackoff: FiniteDuration, - val entityRestartBackoff: FiniteDuration, - val rebalanceInterval: FiniteDuration, - val snapshotAfter: Int, - val leastShardAllocationRebalanceThreshold: Int, + val coordinatorFailureBackoff: FiniteDuration, + val retryInterval: FiniteDuration, + val bufferSize: Int, + val handOffTimeout: FiniteDuration, + val shardStartTimeout: FiniteDuration, + val shardFailureBackoff: FiniteDuration, + val entityRestartBackoff: FiniteDuration, + val rebalanceInterval: FiniteDuration, + val snapshotAfter: Int, + val leastShardAllocationRebalanceThreshold: Int, val leastShardAllocationMaxSimultaneousRebalance: Int, - val waitingForStateTimeout: FiniteDuration, - val updatingStateTimeout: FiniteDuration) + val waitingForStateTimeout: FiniteDuration, + val updatingStateTimeout: FiniteDuration) } /** @@ -102,12 +102,12 @@ object ClusterShardingSettings { * @param tuningParameters additional tuning parameters, see descriptions in reference.conf */ final class ClusterShardingSettings( - val role: Option[String], - val rememberEntities: Boolean, - val journalPluginId: String, - val snapshotPluginId: String, - val stateStoreMode: String, - val tuningParameters: ClusterShardingSettings.TuningParameters, + val role: Option[String], + val rememberEntities: Boolean, + val journalPluginId: String, + val snapshotPluginId: String, + val stateStoreMode: String, + val tuningParameters: ClusterShardingSettings.TuningParameters, val coordinatorSingletonSettings: ClusterSingletonManagerSettings) extends NoSerializationVerificationNeeded { require(stateStoreMode == "persistence" || stateStoreMode == "ddata", @@ -136,13 +136,13 @@ final class ClusterShardingSettings( def withCoordinatorSingletonSettings(coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings = copy(coordinatorSingletonSettings = coordinatorSingletonSettings) - private def copy(role: Option[String] = role, - rememberEntities: Boolean = rememberEntities, - journalPluginId: String = journalPluginId, - snapshotPluginId: String = snapshotPluginId, - stateStoreMode: String = stateStoreMode, - tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters, - coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings): ClusterShardingSettings = + private def copy(role: Option[String] = role, + rememberEntities: Boolean = rememberEntities, + journalPluginId: String = journalPluginId, + snapshotPluginId: String = snapshotPluginId, + stateStoreMode: String = stateStoreMode, + tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters, + coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings): ClusterShardingSettings = new ClusterShardingSettings( role, rememberEntities, diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala index 50ca0af..9629079 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala @@ -78,12 +78,12 @@ private[akka] object Shard { * If `settings.rememberEntities` is enabled the `PersistentShard` * subclass is used, otherwise `Shard`. */ - def props(typeName: String, - shardId: ShardRegion.ShardId, - entityProps: Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, + def props(typeName: String, + shardId: ShardRegion.ShardId, + entityProps: Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, handOffStopMessage: Any): Props = { if (settings.rememberEntities) Props(new PersistentShard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage)) @@ -103,12 +103,12 @@ private[akka] object Shard { * @see [[ClusterSharding$ ClusterSharding extension]] */ private[akka] class Shard( - typeName: String, - shardId: ShardRegion.ShardId, - entityProps: Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, + typeName: String, + shardId: ShardRegion.ShardId, + entityProps: Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, handOffStopMessage: Any) extends Actor with ActorLogging { import ShardRegion.{ handOffStopperProps, EntityId, Msg, Passivate, ShardInitialized } @@ -299,12 +299,12 @@ private[akka] class Shard( * @see [[ClusterSharding$ ClusterSharding extension]] */ private[akka] class PersistentShard( - typeName: String, - shardId: ShardRegion.ShardId, - entityProps: Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, + typeName: String, + shardId: ShardRegion.ShardId, + entityProps: Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, handOffStopMessage: Any) extends Shard( typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage) with PersistentActor with ActorLogging { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala index 9ad74c5..2129ff2 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala @@ -43,7 +43,7 @@ object ShardCoordinator { */ private[akka] def props(typeName: String, settings: ClusterShardingSettings, allocationStrategy: ShardAllocationStrategy, - replicator: ActorRef): Props = + replicator: ActorRef): Props = Props(new DDataShardCoordinator(typeName: String, settings, allocationStrategy, replicator)).withDeploy(Deploy.local) /** @@ -74,7 +74,7 @@ object ShardCoordinator { * @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round */ def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], - rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] } /** @@ -90,7 +90,7 @@ object ShardCoordinator { } override final def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], - rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { import scala.collection.JavaConverters._ implicit val ec = ExecutionContexts.sameThreadExecutionContext rebalance(currentShardAllocations.asJava, rebalanceInProgress.asJava).map(_.asScala.toSet) @@ -118,7 +118,7 @@ object ShardCoordinator { * @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round */ def rebalance(currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]], - rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]] + rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]] } private val emptyRebalanceResult = Future.successful(Set.empty[ShardId]) @@ -142,7 +142,7 @@ object ShardCoordinator { } override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], - rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { if (rebalanceInProgress.size < maxSimultaneousRebalance) { val (regionWithLeastShards, leastShards) = currentShardAllocations.minBy { case (_, v) ⇒ v.size } val mostShards = currentShardAllocations.collect { @@ -255,10 +255,10 @@ object ShardCoordinator { // region for each shard shards: Map[ShardId, ActorRef] = Map.empty, // shards for each region - regions: Map[ActorRef, Vector[ShardId]] = Map.empty, - regionProxies: Set[ActorRef] = Set.empty, - unallocatedShards: Set[ShardId] = Set.empty, - rememberEntities: Boolean = false) extends ClusterShardingSerializable { + regions: Map[ActorRef, Vector[ShardId]] = Map.empty, + regionProxies: Set[ActorRef] = Set.empty, + unallocatedShards: Set[ShardId] = Set.empty, + rememberEntities: Boolean = false) extends ClusterShardingSerializable { def withRememberEntities(enabled: Boolean): State = { if (enabled) @@ -550,7 +550,7 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti implicit val timeout: Timeout = waitMax Future.sequence(aliveRegions.map { regionActor ⇒ (regionActor ? ShardRegion.GetShardRegionStats).mapTo[ShardRegion.ShardRegionStats] - .map(stats ⇒ regionActor -> stats) + .map(stats ⇒ regionActor → stats) }).map { allRegionStats ⇒ ShardRegion.ClusterShardingStats(allRegionStats.map { case (region, stats) ⇒ @@ -559,7 +559,7 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti if (regionAddress.hasLocalScope && regionAddress.system == cluster.selfAddress.system) cluster.selfAddress else regionAddress - address -> stats + address → stats }.toMap) }.recover { case x: AskTimeoutException ⇒ ShardRegion.ClusterShardingStats(Map.empty) @@ -808,7 +808,7 @@ class PersistentShardCoordinator(typeName: String, settings: ClusterShardingSett */ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings, allocationStrategy: ShardCoordinator.ShardAllocationStrategy, - replicator: ActorRef) + replicator: ActorRef) extends ShardCoordinator(typeName, settings, allocationStrategy) with Stash { import ShardCoordinator.Internal._ import akka.cluster.ddata.Replicator.Update diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index a77d8b7..def1e0e 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -33,12 +33,12 @@ object ShardRegion { * Factory method for the [[akka.actor.Props]] of the [[ShardRegion]] actor. */ private[akka] def props( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, - coordinatorPath: String, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, + typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, + coordinatorPath: String, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, handOffStopMessage: Any): Props = Props(new ShardRegion(typeName, Some(entityProps), settings, coordinatorPath, extractEntityId, extractShardId, handOffStopMessage)).withDeploy(Deploy.local) @@ -49,11 +49,11 @@ object ShardRegion { * when using it in proxy only mode. */ private[akka] def proxyProps( - typeName: String, - settings: ClusterShardingSettings, + typeName: String, + settings: ClusterShardingSettings, coordinatorPath: String, extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId): Props = + extractShardId: ShardRegion.ExtractShardId): Props = Props(new ShardRegion(typeName, None, settings, coordinatorPath, extractEntityId, extractShardId, PoisonPill)) .withDeploy(Deploy.local) @@ -341,12 +341,12 @@ object ShardRegion { * @see [[ClusterSharding$ ClusterSharding extension]] */ class ShardRegion( - typeName: String, - entityProps: Option[Props], - settings: ClusterShardingSettings, - coordinatorPath: String, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, + typeName: String, + entityProps: Option[Props], + settings: ClusterShardingSettings, + coordinatorPath: String, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, handOffStopMessage: Any) extends Actor with ActorLogging { import ShardCoordinator.Internal._ diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala index e9071c6..48d297d 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala @@ -64,33 +64,33 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy private val ShardStatsManifest = "DB" private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( - EntityStateManifest -> entityStateFromBinary, - EntityStartedManifest -> entityStartedFromBinary, - EntityStoppedManifest -> entityStoppedFromBinary, - - CoordinatorStateManifest -> coordinatorStateFromBinary, - ShardRegionRegisteredManifest -> { bytes ⇒ ShardRegionRegistered(actorRefMessageFromBinary(bytes)) }, - ShardRegionProxyRegisteredManifest -> { bytes ⇒ ShardRegionProxyRegistered(actorRefMessageFromBinary(bytes)) }, - ShardRegionTerminatedManifest -> { bytes ⇒ ShardRegionTerminated(actorRefMessageFromBinary(bytes)) }, - ShardRegionProxyTerminatedManifest -> { bytes ⇒ ShardRegionProxyTerminated(actorRefMessageFromBinary(bytes)) }, - ShardHomeAllocatedManifest -> shardHomeAllocatedFromBinary, - ShardHomeDeallocatedManifest -> { bytes ⇒ ShardHomeDeallocated(shardIdMessageFromBinary(bytes)) }, - - RegisterManifest -> { bytes ⇒ Register(actorRefMessageFromBinary(bytes)) }, - RegisterProxyManifest -> { bytes ⇒ RegisterProxy(actorRefMessageFromBinary(bytes)) }, - RegisterAckManifest -> { bytes ⇒ RegisterAck(actorRefMessageFromBinary(bytes)) }, - GetShardHomeManifest -> { bytes ⇒ GetShardHome(shardIdMessageFromBinary(bytes)) }, - ShardHomeManifest -> shardHomeFromBinary, - HostShardManifest -> { bytes ⇒ HostShard(shardIdMessageFromBinary(bytes)) }, - ShardStartedManifest -> { bytes ⇒ ShardStarted(shardIdMessageFromBinary(bytes)) }, - BeginHandOffManifest -> { bytes ⇒ BeginHandOff(shardIdMessageFromBinary(bytes)) }, - BeginHandOffAckManifest -> { bytes ⇒ BeginHandOffAck(shardIdMessageFromBinary(bytes)) }, - HandOffManifest -> { bytes ⇒ HandOff(shardIdMessageFromBinary(bytes)) }, - ShardStoppedManifest -> { bytes ⇒ ShardStopped(shardIdMessageFromBinary(bytes)) }, - GracefulShutdownReqManifest -> { bytes ⇒ GracefulShutdownReq(actorRefMessageFromBinary(bytes)) }, - - GetShardStatsManifest -> { bytes ⇒ GetShardStats }, - ShardStatsManifest -> { bytes ⇒ shardStatsFromBinary(bytes) }) + EntityStateManifest → entityStateFromBinary, + EntityStartedManifest → entityStartedFromBinary, + EntityStoppedManifest → entityStoppedFromBinary, + + CoordinatorStateManifest → coordinatorStateFromBinary, + ShardRegionRegisteredManifest → { bytes ⇒ ShardRegionRegistered(actorRefMessageFromBinary(bytes)) }, + ShardRegionProxyRegisteredManifest → { bytes ⇒ ShardRegionProxyRegistered(actorRefMessageFromBinary(bytes)) }, + ShardRegionTerminatedManifest → { bytes ⇒ ShardRegionTerminated(actorRefMessageFromBinary(bytes)) }, + ShardRegionProxyTerminatedManifest → { bytes ⇒ ShardRegionProxyTerminated(actorRefMessageFromBinary(bytes)) }, + ShardHomeAllocatedManifest → shardHomeAllocatedFromBinary, + ShardHomeDeallocatedManifest → { bytes ⇒ ShardHomeDeallocated(shardIdMessageFromBinary(bytes)) }, + + RegisterManifest → { bytes ⇒ Register(actorRefMessageFromBinary(bytes)) }, + RegisterProxyManifest → { bytes ⇒ RegisterProxy(actorRefMessageFromBinary(bytes)) }, + RegisterAckManifest → { bytes ⇒ RegisterAck(actorRefMessageFromBinary(bytes)) }, + GetShardHomeManifest → { bytes ⇒ GetShardHome(shardIdMessageFromBinary(bytes)) }, + ShardHomeManifest → shardHomeFromBinary, + HostShardManifest → { bytes ⇒ HostShard(shardIdMessageFromBinary(bytes)) }, + ShardStartedManifest → { bytes ⇒ ShardStarted(shardIdMessageFromBinary(bytes)) }, + BeginHandOffManifest → { bytes ⇒ BeginHandOff(shardIdMessageFromBinary(bytes)) }, + BeginHandOffAckManifest → { bytes ⇒ BeginHandOffAck(shardIdMessageFromBinary(bytes)) }, + HandOffManifest → { bytes ⇒ HandOff(shardIdMessageFromBinary(bytes)) }, + ShardStoppedManifest → { bytes ⇒ ShardStopped(shardIdMessageFromBinary(bytes)) }, + GracefulShutdownReqManifest → { bytes ⇒ GracefulShutdownReq(actorRefMessageFromBinary(bytes)) }, + + GetShardStatsManifest → { bytes ⇒ GetShardStats }, + ShardStatsManifest → { bytes ⇒ shardStatsFromBinary(bytes) }) override def manifest(obj: AnyRef): String = obj match { case _: EntityState ⇒ EntityStateManifest @@ -194,11 +194,11 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy private def coordinatorStateFromProto(state: sm.CoordinatorState): State = { val shards: Map[String, ActorRef] = state.getShardsList.asScala.toVector.map { entry ⇒ - entry.getShardId -> resolveActorRef(entry.getRegionRef) + entry.getShardId → resolveActorRef(entry.getRegionRef) }(breakOut) val regionsZero: Map[ActorRef, Vector[String]] = - state.getRegionsList.asScala.toVector.map(resolveActorRef(_) -> Vector.empty[String])(breakOut) + state.getRegionsList.asScala.toVector.map(resolveActorRef(_) → Vector.empty[String])(breakOut) val regions: Map[ActorRef, Vector[String]] = shards.foldLeft(regionsZero) { case (acc, (shardId, regionRef)) ⇒ acc.updated(regionRef, acc(regionRef) :+ shardId) } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala index 67a9094..04a8b4a 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala @@ -177,7 +177,7 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf val locations = (for (n ← 1 to 10) yield { val id = n.toString region ! Ping(id) - id -> expectMsgType[ActorRef] + id → expectMsgType[ActorRef] }).toMap shardLocations ! Locations(locations) } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala index ecb2fbc..4e883ba 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala @@ -19,13 +19,13 @@ class LeastShardAllocationStrategySpec extends AkkaSpec { "LeastShardAllocationStrategy" must { "allocate to region with least number of shards" in { - val allocations = Map(regionA -> Vector("shard1"), regionB -> Vector("shard2"), regionC -> Vector.empty) + val allocations = Map(regionA → Vector("shard1"), regionB → Vector("shard2"), regionC → Vector.empty) Await.result(allocationStrategy.allocateShard(regionA, "shard3", allocations), 3.seconds) should ===(regionC) } "rebalance from region with most number of shards" in { - val allocations = Map(regionA -> Vector("shard1"), regionB -> Vector("shard2", "shard3"), - regionC -> Vector.empty) + val allocations = Map(regionA → Vector("shard1"), regionB → Vector("shard2", "shard3"), + regionC → Vector.empty) // so far regionB has 2 shards and regionC has 0 shards, but the diff is less than rebalanceThreshold Await.result(allocationStrategy.rebalance(allocations, Set.empty), 3.seconds) should ===(Set.empty[String]) @@ -39,8 +39,8 @@ class LeastShardAllocationStrategySpec extends AkkaSpec { } "must limit number of simultanious rebalance" in { - val allocations = Map(regionA -> Vector("shard1"), - regionB -> Vector("shard2", "shard3", "shard4", "shard5", "shard6"), regionC -> Vector.empty) + val allocations = Map(regionA → Vector("shard1"), + regionB → Vector("shard2", "shard3", "shard4", "shard5", "shard6"), regionC → Vector.empty) Await.result(allocationStrategy.rebalance(allocations, Set("shard2")), 3.seconds) should ===(Set("shard3")) Await.result(allocationStrategy.rebalance(allocations, Set("shard2", "shard3")), 3.seconds) should ===(Set.empty[String]) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala index 45ef2b3..fc4b725 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala @@ -30,8 +30,8 @@ class ClusterShardingMessageSerializerSpec extends AkkaSpec { "be able to serializable ShardCoordinator snapshot State" in { val state = State( - shards = Map("a" -> region1, "b" -> region2, "c" -> region2), - regions = Map(region1 -> Vector("a"), region2 -> Vector("b", "c"), region3 -> Vector.empty[String]), + shards = Map("a" → region1, "b" → region2, "c" → region2), + regions = Map(region1 → Vector("a"), region2 → Vector("b", "c"), region3 → Vector.empty[String]), regionProxies = Set(regionProxy1, regionProxy2), unallocatedShards = Set("d")) checkSerialization(state) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala index 380c68a..559cd67 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala @@ -105,13 +105,13 @@ object ClusterClientSettings { * external service registry */ final class ClusterClientSettings( - val initialContacts: Set[ActorPath], + val initialContacts: Set[ActorPath], val establishingGetContactsInterval: FiniteDuration, - val refreshContactsInterval: FiniteDuration, - val heartbeatInterval: FiniteDuration, - val acceptableHeartbeatPause: FiniteDuration, - val bufferSize: Int, - val reconnectTimeout: Option[FiniteDuration]) extends NoSerializationVerificationNeeded { + val refreshContactsInterval: FiniteDuration, + val heartbeatInterval: FiniteDuration, + val acceptableHeartbeatPause: FiniteDuration, + val bufferSize: Int, + val reconnectTimeout: Option[FiniteDuration]) extends NoSerializationVerificationNeeded { require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000") @@ -119,12 +119,12 @@ final class ClusterClientSettings( * For binary/source compatibility */ def this( - initialContacts: Set[ActorPath], + initialContacts: Set[ActorPath], establishingGetContactsInterval: FiniteDuration, - refreshContactsInterval: FiniteDuration, - heartbeatInterval: FiniteDuration, - acceptableHeartbeatPause: FiniteDuration, - bufferSize: Int) = + refreshContactsInterval: FiniteDuration, + heartbeatInterval: FiniteDuration, + acceptableHeartbeatPause: FiniteDuration, + bufferSize: Int) = this(initialContacts, establishingGetContactsInterval, refreshContactsInterval, heartbeatInterval, acceptableHeartbeatPause, bufferSize, None) @@ -160,13 +160,13 @@ final class ClusterClientSettings( copy(reconnectTimeout = reconnectTimeout) private def copy( - initialContacts: Set[ActorPath] = initialContacts, - establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval, - refreshContactsInterval: FiniteDuration = refreshContactsInterval, - heartbeatInterval: FiniteDuration = heartbeatInterval, - acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause, - bufferSize: Int = bufferSize, - reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings = + initialContacts: Set[ActorPath] = initialContacts, + establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval, + refreshContactsInterval: FiniteDuration = refreshContactsInterval, + heartbeatInterval: FiniteDuration = heartbeatInterval, + acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause, + bufferSize: Int = bufferSize, + reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings = new ClusterClientSettings(initialContacts, establishingGetContactsInterval, refreshContactsInterval, heartbeatInterval, acceptableHeartbeatPause, bufferSize, reconnectTimeout) } @@ -504,8 +504,8 @@ object ClusterReceptionistSettings { * client will be stopped after this time of inactivity. */ final class ClusterReceptionistSettings( - val role: Option[String], - val numberOfContacts: Int, + val role: Option[String], + val numberOfContacts: Int, val responseTunnelReceiveTimeout: FiniteDuration) extends NoSerializationVerificationNeeded { def withRole(role: String): ClusterReceptionistSettings = copy(role = ClusterReceptionistSettings.roleOption(role)) @@ -519,8 +519,8 @@ final class ClusterReceptionistSettings( copy(responseTunnelReceiveTimeout = responseTunnelReceiveTimeout) private def copy( - role: Option[String] = role, - numberOfContacts: Int = numberOfContacts, + role: Option[String] = role, + numberOfContacts: Int = numberOfContacts, responseTunnelReceiveTimeout: FiniteDuration = responseTunnelReceiveTimeout): ClusterReceptionistSettings = new ClusterReceptionistSettings(role, numberOfContacts, responseTunnelReceiveTimeout) @@ -538,7 +538,7 @@ object ClusterReceptionist { */ def props( pubSubMediator: ActorRef, - settings: ClusterReceptionistSettings): Props = + settings: ClusterReceptionistSettings): Props = Props(new ClusterReceptionist(pubSubMediator, settings)).withDeploy(Deploy.local) /** diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala index ce9e743..1a2f2bb 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala @@ -28,10 +28,10 @@ private[akka] class ClusterClientMessageSerializer(val system: ExtendedActorSyst private val emptyByteArray = Array.empty[Byte] private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( - ContactsManifest -> contactsFromBinary, - GetContactsManifest -> { _ ⇒ GetContacts }, - HeartbeatManifest -> { _ ⇒ Heartbeat }, - HeartbeatRspManifest -> { _ ⇒ HeartbeatRsp }) + ContactsManifest → contactsFromBinary, + GetContactsManifest → { _ ⇒ GetContacts }, + HeartbeatManifest → { _ ⇒ Heartbeat }, + HeartbeatRspManifest → { _ ⇒ HeartbeatRsp }) override def manifest(obj: AnyRef): String = obj match { case _: Contacts ⇒ ContactsManifest diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala index f17d1ba..733599e 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala @@ -83,11 +83,11 @@ object DistributedPubSubSettings { * the registries. Next chunk will be transferred in next round of gossip. */ final class DistributedPubSubSettings( - val role: Option[String], - val routingLogic: RoutingLogic, - val gossipInterval: FiniteDuration, + val role: Option[String], + val routingLogic: RoutingLogic, + val gossipInterval: FiniteDuration, val removedTimeToLive: FiniteDuration, - val maxDeltaElements: Int) extends NoSerializationVerificationNeeded { + val maxDeltaElements: Int) extends NoSerializationVerificationNeeded { require(!routingLogic.isInstanceOf[ConsistentHashingRoutingLogic], "'ConsistentHashingRoutingLogic' can't be used by the pub-sub mediator") @@ -108,11 +108,11 @@ final class DistributedPubSubSettings( def withMaxDeltaElements(maxDeltaElements: Int): DistributedPubSubSettings = copy(maxDeltaElements = maxDeltaElements) - private def copy(role: Option[String] = role, - routingLogic: RoutingLogic = routingLogic, - gossipInterval: FiniteDuration = gossipInterval, + private def copy(role: Option[String] = role, + routingLogic: RoutingLogic = routingLogic, + gossipInterval: FiniteDuration = gossipInterval, removedTimeToLive: FiniteDuration = removedTimeToLive, - maxDeltaElements: Int = maxDeltaElements): DistributedPubSubSettings = + maxDeltaElements: Int = maxDeltaElements): DistributedPubSubSettings = new DistributedPubSubSettings(role, routingLogic, gossipInterval, removedTimeToLive, maxDeltaElements) } @@ -209,7 +209,7 @@ object DistributedPubSubMediator { @SerialVersionUID(1L) final case class Bucket( - owner: Address, + owner: Address, version: Long, content: TreeMap[String, ValueHolder]) @@ -627,7 +627,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act if (nodes(b.owner)) { val myBucket = registry(b.owner) if (b.version > myBucket.version) { - registry += (b.owner -> myBucket.copy(version = b.version, content = myBucket.content ++ b.content)) + registry += (b.owner → myBucket.copy(version = b.version, content = myBucket.content ++ b.content)) } } } @@ -710,8 +710,8 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act def put(key: String, valueOption: Option[ActorRef]): Unit = { val bucket = registry(selfAddress) val v = nextVersion() - registry += (selfAddress -> bucket.copy(version = v, - content = bucket.content + (key -> ValueHolder(v, valueOption)))) + registry += (selfAddress → bucket.copy(version = v, + content = bucket.content + (key → ValueHolder(v, valueOption)))) } def getCurrentTopics(): Set[String] = { @@ -734,11 +734,11 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act def mkKey(path: ActorPath): String = Internal.mkKey(path) - def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) ⇒ (owner -> bucket.version) } + def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) ⇒ (owner → bucket.version) } def collectDelta(otherVersions: Map[Address, Long]): immutable.Iterable[Bucket] = { // missing entries are represented by version 0 - val filledOtherVersions = myVersions.map { case (k, _) ⇒ k -> 0L } ++ otherVersions + val filledOtherVersions = myVersions.map { case (k, _) ⇒ k → 0L } ++ otherVersions var count = 0 filledOtherVersions.collect { case (owner, v) if registry(owner).version > v && count < maxDeltaElements ⇒ @@ -782,7 +782,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act case (key, ValueHolder(version, None)) if (bucket.version - version > removedTimeToLiveMillis) ⇒ key } if (oldRemoved.nonEmpty) - registry += owner -> bucket.copy(content = bucket.content -- oldRemoved) + registry += owner → bucket.copy(content = bucket.content -- oldRemoved) } } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala index 43efd6c..41237ea 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala @@ -44,5 +44,5 @@ private[pubsub] trait PerGroupingBuffer { } } - def initializeGrouping(grouping: String): Unit = buffers += grouping -> Vector.empty + def initializeGrouping(grouping: String): Unit = buffers += grouping → Vector.empty } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala index 9bfd99a..6060746 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala @@ -39,11 +39,11 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private val PublishManifest = "E" private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( - StatusManifest -> statusFromBinary, - DeltaManifest -> deltaFromBinary, - SendManifest -> sendFromBinary, - SendToAllManifest -> sendToAllFromBinary, - PublishManifest -> publishFromBinary) + StatusManifest → statusFromBinary, + DeltaManifest → deltaFromBinary, + SendManifest → sendFromBinary, + SendToAllManifest → sendToAllFromBinary, + PublishManifest → publishFromBinary) override def manifest(obj: AnyRef): String = obj match { case _: Status ⇒ StatusManifest @@ -122,7 +122,7 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private def statusFromProto(status: dm.Status): Status = Status(status.getVersionsList.asScala.map(v ⇒ - addressFromProto(v.getAddress) -> v.getTimestamp)(breakOut)) + addressFromProto(v.getAddress) → v.getTimestamp)(breakOut)) private def deltaToProto(delta: Delta): dm.Delta = { val buckets = delta.buckets.map { b ⇒ @@ -148,7 +148,7 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private def deltaFromProto(delta: dm.Delta): Delta = Delta(delta.getBucketsList.asScala.toVector.map { b ⇒ val content: TreeMap[String, ValueHolder] = b.getContentList.asScala.map { entry ⇒ - entry.getKey -> ValueHolder(entry.getVersion, if (entry.hasRef) Some(resolveActorRef(entry.getRef)) else None) + entry.getKey → ValueHolder(entry.getVersion, if (entry.hasRef) Some(resolveActorRef(entry.getRef)) else None) }(breakOut) Bucket(addressFromProto(b.getOwner), b.getVersion, content) }) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index 1371f88..dbc1ae4 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -86,9 +86,9 @@ object ClusterSingletonManagerSettings { * (+ `removalMargin`). */ final class ClusterSingletonManagerSettings( - val singletonName: String, - val role: Option[String], - val removalMargin: FiniteDuration, + val singletonName: String, + val role: Option[String], + val removalMargin: FiniteDuration, val handOverRetryInterval: FiniteDuration) extends NoSerializationVerificationNeeded { def withSingletonName(name: String): ClusterSingletonManagerSettings = copy(singletonName = name) @@ -103,9 +103,9 @@ final class ClusterSingletonManagerSettings( def withHandOverRetryInterval(retryInterval: FiniteDuration): ClusterSingletonManagerSettings = copy(handOverRetryInterval = retryInterval) - private def copy(singletonName: String = singletonName, - role: Option[String] = role, - removalMargin: FiniteDuration = removalMargin, + private def copy(singletonName: String = singletonName, + role: Option[String] = role, + removalMargin: FiniteDuration = removalMargin, handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings = new ClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval) } @@ -121,9 +121,9 @@ object ClusterSingletonManager { * Scala API: Factory method for `ClusterSingletonManager` [[akka.actor.Props]]. */ def props( - singletonProps: Props, + singletonProps: Props, terminationMessage: Any, - settings: ClusterSingletonManagerSettings): Props = + settings: ClusterSingletonManagerSettings): Props = Props(new ClusterSingletonManager(singletonProps, terminationMessage, settings)).withDeploy(Deploy.local) /** @@ -364,9 +364,9 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess * @param settings see [[ClusterSingletonManagerSettings]] */ class ClusterSingletonManager( - singletonProps: Props, + singletonProps: Props, terminationMessage: Any, - settings: ClusterSingletonManagerSettings) + settings: ClusterSingletonManagerSettings) extends Actor with FSM[ClusterSingletonManager.State, ClusterSingletonManager.Data] { import ClusterSingletonManager.Internal._ @@ -406,7 +406,7 @@ class ClusterSingletonManager( var removed = Map.empty[Address, Deadline] def addRemoved(address: Address): Unit = - removed += address -> (Deadline.now + 15.minutes) + removed += address → (Deadline.now + 15.minutes) def cleanupOverdueNotMemberAnyMore(): Unit = { removed = removed filter { case (address, deadline) ⇒ deadline.hasTimeLeft } @@ -698,24 +698,24 @@ class ClusterSingletonManager( } onTransition { - case from -> to ⇒ logInfo("ClusterSingletonManager state change [{} -> {}]", from, to) + case from → to ⇒ logInfo("ClusterSingletonManager state change [{} -> {}]", from, to) } onTransition { - case _ -> BecomingOldest ⇒ setTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval, repeat = false) + case _ → BecomingOldest ⇒ setTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval, repeat = false) } onTransition { - case BecomingOldest -> _ ⇒ cancelTimer(HandOverRetryTimer) - case WasOldest -> _ ⇒ cancelTimer(TakeOverRetryTimer) + case BecomingOldest → _ ⇒ cancelTimer(HandOverRetryTimer) + case WasOldest → _ ⇒ cancelTimer(TakeOverRetryTimer) } onTransition { - case _ -> (Younger | Oldest) ⇒ getNextOldestChanged() + case _ → (Younger | Oldest) ⇒ getNextOldestChanged() } onTransition { - case _ -> (Younger | End) if removed.contains(cluster.selfAddress) ⇒ + case _ → (Younger | End) if removed.contains(cluster.selfAddress) ⇒ logInfo("Self removed, stopping ClusterSingletonManager") // note that FSM.stop() can't be used in onTransition context.stop(self) diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 8cd8599..b6498d0 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -69,10 +69,10 @@ object ClusterSingletonProxySettings { * immediately if the location of the singleton is unknown. */ final class ClusterSingletonProxySettings( - val singletonName: String, - val role: Option[String], + val singletonName: String, + val role: Option[String], val singletonIdentificationInterval: FiniteDuration, - val bufferSize: Int) extends NoSerializationVerificationNeeded { + val bufferSize: Int) extends NoSerializationVerificationNeeded { require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000") @@ -88,10 +88,10 @@ final class ClusterSingletonProxySettings( def withBufferSize(bufferSize: Int): ClusterSingletonProxySettings = copy(bufferSize = bufferSize) - private def copy(singletonName: String = singletonName, - role: Option[String] = role, + private def copy(singletonName: String = singletonName, + role: Option[String] = role, singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, - bufferSize: Int = bufferSize): ClusterSingletonProxySettings = + bufferSize: Int = bufferSize): ClusterSingletonProxySettings = new ClusterSingletonProxySettings(singletonName, role, singletonIdentificationInterval, bufferSize) } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala index 7a9ae66..00e1233 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala @@ -30,10 +30,10 @@ private[akka] class ClusterSingletonMessageSerializer(val system: ExtendedActorS private val emptyByteArray = Array.empty[Byte] private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( - HandOverToMeManifest -> { _ ⇒ HandOverToMe }, - HandOverInProgressManifest -> { _ ⇒ HandOverInProgress }, - HandOverDoneManifest -> { _ ⇒ HandOverDone }, - TakeOverFromMeManifest -> { _ ⇒ TakeOverFromMe }) + HandOverToMeManifest → { _ ⇒ HandOverToMe }, + HandOverInProgressManifest → { _ ⇒ HandOverInProgress }, + HandOverDoneManifest → { _ ⇒ HandOverDone }, + TakeOverFromMeManifest → { _ ⇒ TakeOverFromMe }) override def manifest(obj: AnyRef): String = obj match { case HandOverToMe ⇒ HandOverToMeManifest diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala index 3feca95..09a7618 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala @@ -146,7 +146,7 @@ class DistributedPubSubMediatorSpec extends MultiNodeSpec(DistributedPubSubMedia def createChatUser(name: String): ActorRef = { var a = system.actorOf(Props(classOf[TestChatUser], mediator, testActor), name) - chatUsers += (name -> a) + chatUsers += (name → a) a } @@ -473,11 +473,11 @@ class DistributedPubSubMediatorSpec extends MultiNodeSpec(DistributedPubSubMedia val deltaBuckets1 = expectMsgType[Delta].buckets deltaBuckets1.map(_.content.size).sum should ===(500) - mediator ! Status(versions = deltaBuckets1.map(b ⇒ b.owner -> b.version).toMap) + mediator ! Status(versions = deltaBuckets1.map(b ⇒ b.owner → b.version).toMap) val deltaBuckets2 = expectMsgType[Delta].buckets deltaBuckets1.map(_.content.size).sum should ===(500) - mediator ! Status(versions = deltaBuckets2.map(b ⇒ b.owner -> b.version).toMap) + mediator ! Status(versions = deltaBuckets2.map(b ⇒ b.owner → b.version).toMap) val deltaBuckets3 = expectMsgType[Delta].buckets deltaBuckets3.map(_.content.size).sum should ===(10 + 9 + 2 + many - 500 - 500) diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala index 0d7fb4e..bcd7b0b 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala @@ -31,11 +31,11 @@ class DistributedPubSubMessageSerializerSpec extends AkkaSpec { val u2 = system.actorOf(Props.empty, "u2") val u3 = system.actorOf(Props.empty, "u3") val u4 = system.actorOf(Props.empty, "u4") - checkSerialization(Status(Map(address1 -> 3, address2 -> 17, address3 -> 5))) + checkSerialization(Status(Map(address1 → 3, address2 → 17, address3 → 5))) checkSerialization(Delta(List( - Bucket(address1, 3, TreeMap("/user/u1" -> ValueHolder(2, Some(u1)), "/user/u2" -> ValueHolder(3, Some(u2)))), - Bucket(address2, 17, TreeMap("/user/u3" -> ValueHolder(17, Some(u3)))), - Bucket(address3, 5, TreeMap("/user/u4" -> ValueHolder(4, Some(u4)), "/user/u5" -> ValueHolder(5, None)))))) + Bucket(address1, 3, TreeMap("/user/u1" → ValueHolder(2, Some(u1)), "/user/u2" → ValueHolder(3, Some(u2)))), + Bucket(address2, 17, TreeMap("/user/u3" → ValueHolder(17, Some(u3)))), + Bucket(address3, 5, TreeMap("/user/u4" → ValueHolder(4, Some(u4)), "/user/u5" → ValueHolder(5, None)))))) checkSerialization(Send("/user/u3", "hello", localAffinity = true)) checkSerialization(SendToAll("/user/u3", "hello", allButSelf = true)) checkSerialization(Publish("mytopic", "hello")) diff --git a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala index e183f3e..9ca8a46 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala @@ -125,7 +125,7 @@ private[cluster] abstract class AutoDownBase(autoDownUnreachableAfter: FiniteDur downOrAddPending(node) } else { val task = scheduler.scheduleOnce(autoDownUnreachableAfter, self, UnreachableTimeout(node)) - scheduledUnreachable += (node -> task) + scheduledUnreachable += (node → task) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 8e34933..b304421 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -123,9 +123,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { case tf ⇒ tf } system.dynamicAccess.createInstanceFor[Scheduler](system.settings.SchedulerClass, immutable.Seq( - classOf[Config] -> cfg, - classOf[LoggingAdapter] -> log, - classOf[ThreadFactory] -> threadFactory)).get + classOf[Config] → cfg, + classOf[LoggingAdapter] → log, + classOf[ThreadFactory] → threadFactory)).get } else { // delegate to system.scheduler, but don't close over system val systemScheduler = system.scheduler @@ -138,7 +138,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = systemScheduler.schedule(initialDelay, interval, runnable) - override def scheduleOnce(delay: FiniteDuration, + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = systemScheduler.scheduleOnce(delay, runnable) } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala index b621cfc..382edd6 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala @@ -21,9 +21,9 @@ import com.typesafe.config.ConfigFactory * the `ClusterActorRefProvider` is used. */ private[akka] class ClusterActorRefProvider( - _systemName: String, - _settings: ActorSystem.Settings, - _eventStream: EventStream, + _systemName: String, + _settings: ActorSystem.Settings, + _eventStream: EventStream, _dynamicAccess: DynamicAccess) extends RemoteActorRefProvider( _systemName, _settings, _eventStream, _dynamicAccess) { diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 52327b2..897553e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -1290,10 +1290,10 @@ private[cluster] class OnMemberStatusChangedListener(callback: Runnable, status: @SerialVersionUID(1L) private[cluster] final case class GossipStats( receivedGossipCount: Long = 0L, - mergeCount: Long = 0L, - sameCount: Long = 0L, - newerCount: Long = 0L, - olderCount: Long = 0L) { + mergeCount: Long = 0L, + sameCount: Long = 0L, + newerCount: Long = 0L, + olderCount: Long = 0L) { def incrementMergeCount(): GossipStats = copy(mergeCount = mergeCount + 1, receivedGossipCount = receivedGossipCount + 1) @@ -1333,5 +1333,5 @@ private[cluster] final case class GossipStats( @SerialVersionUID(1L) private[cluster] final case class VectorClockStats( versionSize: Int = 0, - seenLatest: Int = 0) + seenLatest: Int = 0) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index ac93008..72852ad 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -56,10 +56,10 @@ object ClusterEvent { * Current snapshot state of the cluster. Sent to new subscriber. */ final case class CurrentClusterState( - members: immutable.SortedSet[Member] = immutable.SortedSet.empty, - unreachable: Set[Member] = Set.empty, - seenBy: Set[Address] = Set.empty, - leader: Option[Address] = None, + members: immutable.SortedSet[Member] = immutable.SortedSet.empty, + unreachable: Set[Member] = Set.empty, + seenBy: Set[Address] = Set.empty, + leader: Option[Address] = None, roleLeaderMap: Map[String, Option[Address]] = Map.empty) { /** @@ -395,7 +395,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto unreachable = unreachable, seenBy = latestGossip.seenBy.map(_.address), leader = latestGossip.leader(selfUniqueAddress).map(_.address), - roleLeaderMap = latestGossip.allRoles.map(r ⇒ r -> latestGossip.roleLeader(r, selfUniqueAddress) + roleLeaderMap = latestGossip.allRoles.map(r ⇒ r → latestGossip.roleLeader(r, selfUniqueAddress) .map(_.address))(collection.breakOut)) receiver ! state } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index f192c93..b2710c7 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -173,9 +173,9 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg * It is immutable, but it updates the failureDetector. */ private[cluster] final case class ClusterHeartbeatSenderState( - ring: HeartbeatNodeRing, + ring: HeartbeatNodeRing, oldReceiversNowUnreachable: Set[UniqueAddress], - failureDetector: FailureDetectorRegistry[Address]) { + failureDetector: FailureDetectorRegistry[Address]) { val activeReceivers: Set[UniqueAddress] = ring.myReceivers union oldReceiversNowUnreachable @@ -241,9 +241,9 @@ private[cluster] final case class ClusterHeartbeatSenderState( * It is immutable, i.e. the methods return new instances. */ private[cluster] final case class HeartbeatNodeRing( - selfAddress: UniqueAddress, - nodes: Set[UniqueAddress], - unreachable: Set[UniqueAddress], + selfAddress: UniqueAddress, + nodes: Set[UniqueAddress], + unreachable: Set[UniqueAddress], monitoredByNrOfMembers: Int) { require(nodes contains selfAddress, s"nodes [${nodes.mkString(", ")}] must contain selfAddress [${selfAddress}]") diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala index 941fdb8..f71dc2e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala @@ -535,11 +535,11 @@ object StandardMetrics { */ @SerialVersionUID(1L) final case class Cpu( - address: Address, - timestamp: Long, + address: Address, + timestamp: Long, systemLoadAverage: Option[Double], - cpuCombined: Option[Double], - processors: Int) { + cpuCombined: Option[Double], + processors: Int) { cpuCombined match { case Some(x) ⇒ require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]") @@ -804,7 +804,7 @@ private[cluster] object MetricsCollector { } } else { - system.dynamicAccess.createInstanceFor[MetricsCollector](fqcn, List(classOf[ActorSystem] -> system)). + system.dynamicAccess.createInstanceFor[MetricsCollector](fqcn, List(classOf[ActorSystem] → system)). recover { case e ⇒ throw new ConfigurationException("Could not create custom metrics collector [" + fqcn + "] due to:" + e.toString) }.get diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 0dc643a..c787c7c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -4,7 +4,7 @@ package akka.cluster -// TODO remove metrics +// TODO remove metrics import java.io.Closeable import scala.collection.immutable @@ -74,7 +74,7 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { case LeaderChanged(leader) ⇒ _state = _state.copy(leader = leader) case RoleLeaderChanged(role, leader) ⇒ - _state = _state.copy(roleLeaderMap = _state.roleLeaderMap + (role -> leader)) + _state = _state.copy(roleLeaderMap = _state.roleLeaderMap + (role → leader)) case stats: CurrentInternalStats ⇒ _latestStats = stats case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes case ClusterShuttingDown ⇒ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala index 00d64a5..8fb7299 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala @@ -21,9 +21,9 @@ private[cluster] object ClusterRemoteWatcher { * Factory method for `ClusterRemoteWatcher` [[akka.actor.Props]]. */ def props( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, + failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, heartbeatExpectedResponseAfter: FiniteDuration): Props = Props(classOf[ClusterRemoteWatcher], failureDetector, heartbeatInterval, unreachableReaperInterval, heartbeatExpectedResponseAfter).withDeploy(Deploy.local) @@ -41,9 +41,9 @@ private[cluster] object ClusterRemoteWatcher { * of the cluster and then later becomes cluster member. */ private[cluster] class ClusterRemoteWatcher( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, + failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, heartbeatExpectedResponseAfter: FiniteDuration) extends RemoteWatcher( failureDetector, diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index f55d02a..f42662e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -83,7 +83,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { val MinNrOfMembersOfRole: Map[String, Int] = { import scala.collection.JavaConverters._ cc.getConfig("role").root.asScala.collect { - case (key, value: ConfigObject) ⇒ (key -> value.toConfig.getInt("min-nr-of-members")) + case (key, value: ConfigObject) ⇒ (key → value.toConfig.getInt("min-nr-of-members")) }.toMap } val JmxEnabled: Boolean = cc.getBoolean("jmx.enabled") diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index 30df8e6..7699c3d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -60,9 +60,9 @@ private[cluster] object Gossip { */ @SerialVersionUID(1L) private[cluster] final case class Gossip( - members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address - overview: GossipOverview = GossipOverview(), - version: VectorClock = VectorClock()) { // vector clock version + members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address + overview: GossipOverview = GossipOverview(), + version: VectorClock = VectorClock()) { // vector clock version if (Cluster.isAssertInvariantsEnabled) assertInvariants() @@ -84,7 +84,7 @@ private[cluster] final case class Gossip( } @transient private lazy val membersMap: Map[UniqueAddress, Member] = - members.map(m ⇒ m.uniqueAddress -> m)(collection.breakOut) + members.map(m ⇒ m.uniqueAddress → m)(collection.breakOut) /** * Increments the version for this 'Node'. @@ -227,8 +227,8 @@ private[cluster] final case class Gossip( */ @SerialVersionUID(1L) private[cluster] final case class GossipOverview( - seen: Set[UniqueAddress] = Set.empty, - reachability: Reachability = Reachability.empty) { + seen: Set[UniqueAddress] = Set.empty, + reachability: Reachability = Reachability.empty) { override def toString = s"GossipOverview(reachability = [$reachability], seen = [${seen.mkString(", ")}])" @@ -252,11 +252,11 @@ object GossipEnvelope { */ @SerialVersionUID(2L) private[cluster] class GossipEnvelope private ( - val from: UniqueAddress, - val to: UniqueAddress, - @volatile var g: Gossip, - serDeadline: Deadline, - @transient @volatile var ser: () ⇒ Gossip) extends ClusterMessage { + val from: UniqueAddress, + val to: UniqueAddress, + @volatile var g: Gossip, + serDeadline: Deadline, + @transient @volatile var ser:() ⇒ Gossip) extends ClusterMessage { def gossip: Gossip = { deserialize() diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 3f55ef5..79e5b9a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -15,10 +15,10 @@ import MemberStatus._ */ @SerialVersionUID(1L) class Member private[cluster] ( - val uniqueAddress: UniqueAddress, + val uniqueAddress: UniqueAddress, private[cluster] val upNumber: Int, // INTERNAL API - val status: MemberStatus, - val roles: Set[String]) extends Serializable { + val status: MemberStatus, + val roles: Set[String]) extends Serializable { def address: Address = uniqueAddress.address @@ -233,13 +233,13 @@ object MemberStatus { */ private[cluster] val allowedTransitions: Map[MemberStatus, Set[MemberStatus]] = Map( - Joining -> Set(WeaklyUp, Up, Down, Removed), - WeaklyUp -> Set(Up, Down, Removed), - Up -> Set(Leaving, Down, Removed), - Leaving -> Set(Exiting, Down, Removed), - Down -> Set(Removed), - Exiting -> Set(Removed, Down), - Removed -> Set.empty[MemberStatus]) + Joining → Set(WeaklyUp, Up, Down, Removed), + WeaklyUp → Set(Up, Down, Removed), + Up → Set(Leaving, Down, Removed), + Leaving → Set(Exiting, Down, Removed), + Down → Set(Removed), + Exiting → Set(Removed, Down), + Removed → Set.empty[MemberStatus]) } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index 57ced59..c978cf8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -47,7 +47,7 @@ private[cluster] object Reachability { */ @SerialVersionUID(1L) private[cluster] class Reachability private ( - val records: immutable.IndexedSeq[Reachability.Record], + val records: immutable.IndexedSeq[Reachability.Record], val versions: Map[UniqueAddress, Long]) extends Serializable { import Reachability._ @@ -67,10 +67,10 @@ private[cluster] class Reachability private ( records foreach { r ⇒ val m = mapBuilder.get(r.observer) match { - case None ⇒ Map(r.subject -> r) + case None ⇒ Map(r.subject → r) case Some(m) ⇒ m.updated(r.subject, r) } - mapBuilder += (r.observer -> m) + mapBuilder += (r.observer → m) if (r.status == Unreachable) unreachableBuilder += r.subject else if (r.status == Terminated) terminatedBuilder += r.subject @@ -167,7 +167,7 @@ private[cluster] class Reachability private ( } if (observerVersion2 > observerVersion1) - newVersions += (observer -> observerVersion2) + newVersions += (observer → observerVersion2) } newVersions = newVersions.filterNot { case (k, _) ⇒ !allowed(k) } @@ -242,7 +242,7 @@ private[cluster] class Reachability private ( case (subject, records) if records.exists(_.status == Unreachable) ⇒ val observers: Set[UniqueAddress] = records.collect { case r if r.status == Unreachable ⇒ r.observer }(breakOut) - (subject -> observers) + (subject → observers) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index 8318e12..0da453b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -37,27 +37,27 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri private lazy val GossipTimeToLive = Cluster(system).settings.GossipTimeToLive private val fromBinaryMap = collection.immutable.HashMap[Class[_ <: ClusterMessage], Array[Byte] ⇒ AnyRef]( - classOf[InternalClusterAction.Join] -> { + classOf[InternalClusterAction.Join] → { case bytes ⇒ val m = cm.Join.parseFrom(bytes) InternalClusterAction.Join(uniqueAddressFromProto(m.getNode), Set.empty[String] ++ m.getRolesList.asScala) }, - classOf[InternalClusterAction.Welcome] -> { + classOf[InternalClusterAction.Welcome] → { case bytes ⇒ val m = cm.Welcome.parseFrom(decompress(bytes)) InternalClusterAction.Welcome(uniqueAddressFromProto(m.getFrom), gossipFromProto(m.getGossip)) }, - classOf[ClusterUserAction.Leave] -> (bytes ⇒ ClusterUserAction.Leave(addressFromBinary(bytes))), - classOf[ClusterUserAction.Down] -> (bytes ⇒ ClusterUserAction.Down(addressFromBinary(bytes))), - InternalClusterAction.InitJoin.getClass -> (_ ⇒ InternalClusterAction.InitJoin), - classOf[InternalClusterAction.InitJoinAck] -> (bytes ⇒ InternalClusterAction.InitJoinAck(addressFromBinary(bytes))), - classOf[InternalClusterAction.InitJoinNack] -> (bytes ⇒ InternalClusterAction.InitJoinNack(addressFromBinary(bytes))), - classOf[ClusterHeartbeatSender.Heartbeat] -> (bytes ⇒ ClusterHeartbeatSender.Heartbeat(addressFromBinary(bytes))), - classOf[ClusterHeartbeatSender.HeartbeatRsp] -> (bytes ⇒ ClusterHeartbeatSender.HeartbeatRsp(uniqueAddressFromBinary(bytes))), - classOf[GossipStatus] -> gossipStatusFromBinary, - classOf[GossipEnvelope] -> gossipEnvelopeFromBinary, - classOf[MetricsGossipEnvelope] -> metricsGossipEnvelopeFromBinary) + classOf[ClusterUserAction.Leave] → (bytes ⇒ ClusterUserAction.Leave(addressFromBinary(bytes))), + classOf[ClusterUserAction.Down] → (bytes ⇒ ClusterUserAction.Down(addressFromBinary(bytes))), + InternalClusterAction.InitJoin.getClass → (_ ⇒ InternalClusterAction.InitJoin), + classOf[InternalClusterAction.InitJoinAck] → (bytes ⇒ InternalClusterAction.InitJoinAck(addressFromBinary(bytes))), + classOf[InternalClusterAction.InitJoinNack] → (bytes ⇒ InternalClusterAction.InitJoinNack(addressFromBinary(bytes))), + classOf[ClusterHeartbeatSender.Heartbeat] → (bytes ⇒ ClusterHeartbeatSender.Heartbeat(addressFromBinary(bytes))), + classOf[ClusterHeartbeatSender.HeartbeatRsp] → (bytes ⇒ ClusterHeartbeatSender.HeartbeatRsp(uniqueAddressFromBinary(bytes))), + classOf[GossipStatus] → gossipStatusFromBinary, + classOf[GossipEnvelope] → gossipEnvelopeFromBinary, + classOf[MetricsGossipEnvelope] → metricsGossipEnvelopeFromBinary) def includeManifest: Boolean = true @@ -164,20 +164,20 @@ class ClusterMessageSerializer(val system: ExtendedActorSystem) extends BaseSeri UniqueAddress(addressFromProto(uniqueAddress.getAddress), uniqueAddress.getUid) private val memberStatusToInt = scala.collection.immutable.HashMap[MemberStatus, Int]( - MemberStatus.Joining -> cm.MemberStatus.Joining_VALUE, - MemberStatus.Up -> cm.MemberStatus.Up_VALUE, - MemberStatus.Leaving -> cm.MemberStatus.Leaving_VALUE, - MemberStatus.Exiting -> cm.MemberStatus.Exiting_VALUE, - MemberStatus.Down -> cm.MemberStatus.Down_VALUE, - MemberStatus.Removed -> cm.MemberStatus.Removed_VALUE, - MemberStatus.WeaklyUp -> cm.MemberStatus.WeaklyUp_VALUE) + MemberStatus.Joining → cm.MemberStatus.Joining_VALUE, + MemberStatus.Up → cm.MemberStatus.Up_VALUE, + MemberStatus.Leaving → cm.MemberStatus.Leaving_VALUE, + MemberStatus.Exiting → cm.MemberStatus.Exiting_VALUE, + MemberStatus.Down → cm.MemberStatus.Down_VALUE, + MemberStatus.Removed → cm.MemberStatus.Removed_VALUE, + MemberStatus.WeaklyUp → cm.MemberStatus.WeaklyUp_VALUE) private val memberStatusFromInt = memberStatusToInt.map { case (a, b) ⇒ (b, a) } private val reachabilityStatusToInt = scala.collection.immutable.HashMap[Reachability.ReachabilityStatus, Int]( - Reachability.Reachable -> cm.ReachabilityStatus.Reachable_VALUE, - Reachability.Unreachable -> cm.ReachabilityStatus.Unreachable_VALUE, - Reachability.Terminated -> cm.ReachabilityStatus.Terminated_VALUE) + Reachability.Reachable → cm.ReachabilityStatus.Reachable_VALUE, + Reachability.Unreachable → cm.ReachabilityStatus.Unreachable_VALUE, + Reachability.Terminated → cm.ReachabilityStatus.Terminated_VALUE) private val reachabilityStatusFromInt = reachabilityStatusToInt.map { case (a, b) ⇒ (b, a) } diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala index bc32693..e459429 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancing.scala @@ -131,11 +131,11 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS @SerialVersionUID(1L) @deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4") final case class AdaptiveLoadBalancingPool( - metricsSelector: MetricsSelector = MixMetricsSelector, - override val nrOfInstances: Int = 0, + metricsSelector: MetricsSelector = MixMetricsSelector, + override val nrOfInstances: Int = 0, override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) extends Pool { def this(config: Config, dynamicAccess: DynamicAccess) = @@ -212,9 +212,9 @@ final case class AdaptiveLoadBalancingPool( @SerialVersionUID(1L) @deprecated("Superseded by akka.cluster.metrics (in akka-cluster-metrics jar)", "2.4") final case class AdaptiveLoadBalancingGroup( - metricsSelector: MetricsSelector = MixMetricsSelector, - override val paths: immutable.Iterable[String] = Nil, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + metricsSelector: MetricsSelector = MixMetricsSelector, + override val paths: immutable.Iterable[String] = Nil, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { def this(config: Config, dynamicAccess: DynamicAccess) = @@ -229,7 +229,7 @@ final case class AdaptiveLoadBalancingGroup( * sent with [[akka.actor.ActorSelection]] to these paths */ def this(metricsSelector: MetricsSelector, - routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths)) + routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths)) override def paths(system: ActorSystem): immutable.Iterable[String] = this.paths @@ -363,9 +363,9 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe combined.foldLeft(Map.empty[Address, (Double, Int)].withDefaultValue((0.0, 0))) { case (acc, (address, capacity)) ⇒ val (sum, count) = acc(address) - acc + (address -> ((sum + capacity, count + 1))) + acc + (address → ((sum + capacity, count + 1))) }.map { - case (addr, (sum, count)) ⇒ (addr -> sum / count) + case (addr, (sum, count)) ⇒ (addr → sum / count) } } @@ -380,7 +380,7 @@ object MetricsSelector { case "cpu" ⇒ CpuMetricsSelector case "load" ⇒ SystemLoadAverageMetricsSelector case fqn ⇒ - val args = List(classOf[Config] -> config) + val args = List(classOf[Config] → config) dynamicAccess.createInstanceFor[MetricsSelector](fqn, args).recover({ case exception ⇒ throw new IllegalArgumentException( (s"Cannot instantiate metrics-selector [$fqn], " + @@ -430,7 +430,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector { val (_, min) = capacity.minBy { case (_, c) ⇒ c } // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero val divisor = math.max(0.01, min) - capacity map { case (addr, c) ⇒ (addr -> math.round((c) / divisor).toInt) } + capacity map { case (addr, c) ⇒ (addr → math.round((c) / divisor).toInt) } } } diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index 99e42cc..4a1f556 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -43,10 +43,10 @@ object ClusterRouterGroupSettings { */ @SerialVersionUID(1L) final case class ClusterRouterGroupSettings( - totalInstances: Int, - routeesPaths: immutable.Seq[String], + totalInstances: Int, + routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, - useRole: Option[String]) extends ClusterRouterSettingsBase { + useRole: Option[String]) extends ClusterRouterSettingsBase { /** * Java API @@ -82,10 +82,10 @@ object ClusterRouterPoolSettings { */ @SerialVersionUID(1L) final case class ClusterRouterPoolSettings( - totalInstances: Int, + totalInstances: Int, maxInstancesPerNode: Int, - allowLocalRoutees: Boolean, - useRole: Option[String]) extends ClusterRouterSettingsBase { + allowLocalRoutees: Boolean, + useRole: Option[String]) extends ClusterRouterSettingsBase { /** * Java API @@ -276,9 +276,9 @@ private[akka] class ClusterRouterPoolActor( } else { // find the node with least routees val numberOfRouteesPerNode: Map[Address, Int] = - currentRoutees.foldLeft(currentNodes.map(_ -> 0).toMap.withDefaultValue(0)) { (acc, x) ⇒ + currentRoutees.foldLeft(currentNodes.map(_ → 0).toMap.withDefaultValue(0)) { (acc, x) ⇒ val address = fullAddress(x) - acc + (address -> (acc(address) + 1)) + acc + (address → (acc(address) + 1)) } val (address, count) = numberOfRouteesPerNode.minBy(_._2) @@ -304,7 +304,7 @@ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSett var usedRouteePaths: Map[Address, Set[String]] = if (settings.allowLocalRoutees) - Map(cluster.selfAddress -> settings.routeesPaths.toSet) + Map(cluster.selfAddress → settings.routeesPaths.toSet) else Map.empty diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala index 7b784b2..132d013 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala @@ -4,7 +4,7 @@ package akka.cluster -// TODO remove metrics +// TODO remove metrics import akka.remote.testkit.{ MultiNodeSpec, MultiNodeConfig } import com.typesafe.config.ConfigFactory diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala index b393cd6..2f8da61 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala @@ -4,7 +4,7 @@ package akka.cluster -// TODO remove metrics +// TODO remove metrics import scala.language.postfixOps import scala.concurrent.duration._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala index 4a098f4..eb81ea9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala @@ -39,7 +39,7 @@ abstract class DeterministicOldestWhenJoiningSpec // reverse order because that expose the bug in issue #18554 def seedNodes: immutable.IndexedSeq[Address] = Vector(address(seed1), address(seed2), address(seed3)).sorted(Member.addressOrdering).reverse - val roleByAddress = Map(address(seed1) -> seed1, address(seed2) -> seed2, address(seed3) -> seed3) + val roleByAddress = Map(address(seed1) → seed1, address(seed2) → seed2, address(seed3) → seed3) "Joining a cluster" must { "result in deterministic oldest node" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index fd12587..c3368dd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -282,9 +282,9 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro * Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring. */ def awaitMembersUp( - numberOfMembers: Int, - canNotBePartOfMemberRing: Set[Address] = Set.empty, - timeout: FiniteDuration = 25.seconds): Unit = { + numberOfMembers: Int, + canNotBePartOfMemberRing: Set[Address] = Set.empty, + timeout: FiniteDuration = 25.seconds): Unit = { within(timeout) { if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set awaitAssert(canNotBePartOfMemberRing foreach (a ⇒ clusterView.members.map(_.address) should not contain (a))) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala index 044a59e..2f16840 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala @@ -228,8 +228,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { } final case class ClusterResult( - address: Address, - duration: Duration, + address: Address, + duration: Duration, clusterStats: GossipStats) final case class AggregatedClusterResult(title: String, duration: Duration, clusterStats: GossipStats) @@ -270,8 +270,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { def receive = { case ClusterMetricsChanged(clusterMetrics) ⇒ nodeMetrics = clusterMetrics - case PhiResult(from, phiValues) ⇒ phiValuesObservedByNode += from -> phiValues - case StatsResult(from, stats) ⇒ clusterStatsObservedByNode += from -> stats + case PhiResult(from, phiValues) ⇒ phiValuesObservedByNode += from → phiValues + case StatsResult(from, stats) ⇒ clusterStatsObservedByNode += from → stats case ReportTick ⇒ if (infolog) log.info(s"[${title}] in progress\n${formatMetrics}\n\n${formatPhi}\n\n${formatStats}") @@ -411,7 +411,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { val φ = phi(node) if (φ > 0 || cluster.failureDetector.isMonitoring(node)) { val aboveOne = if (!φ.isInfinite && φ > 1.0) 1 else 0 - phiByNode += node -> PhiValue(node, previous.countAboveOne + aboveOne, previous.count + 1, + phiByNode += node → PhiValue(node, previous.countAboveOne + aboveOne, previous.count + 1, math.max(previous.max, φ)) } } @@ -560,7 +560,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { } def send(job: Job): Unit = { - outstanding += job.id -> JobState(Deadline.now + retryTimeout, job) + outstanding += job.id → JobState(Deadline.now + retryTimeout, job) sendCounter += 1 workers ! job } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala index 045552b..f6bc711 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala @@ -99,11 +99,11 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa Await.result(router ? GetRoutees, timeout.duration).asInstanceOf[Routees].routees def receiveReplies(expectedReplies: Int): Map[Address, Int] = { - val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) + val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0) (receiveWhile(5 seconds, messages = expectedReplies) { case Reply(address) ⇒ address }).foldLeft(zero) { - case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) + case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1)) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala index bebd5ce..1705bae 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala @@ -112,11 +112,11 @@ abstract class ClusterRoundRobinSpec extends MultiNodeSpec(ClusterRoundRobinMult lazy val router5 = system.actorOf(RoundRobinPool(nrOfInstances = 0).props(Props[SomeActor]), "router5") def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = { - val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) + val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0) (receiveWhile(5 seconds, messages = expectedReplies) { case Reply(`routeeType`, ref) ⇒ fullAddress(ref) }).foldLeft(zero) { - case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) + case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1)) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala index f7732e6..3627387 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala @@ -63,11 +63,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp import akka.cluster.routing.UseRoleIgnoredMultiJvmSpec._ def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = { - val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) + val zero = Map.empty[Address, Int] ++ roles.map(address(_) → 0) (receiveWhile(5 seconds, messages = expectedReplies) { case Reply(`routeeType`, ref) ⇒ fullAddress(ref) }).foldLeft(zero) { - case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) + case (replyMap, address) ⇒ replyMap + (address → (replyMap(address) + 1)) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala index 7e39a18..e802ec4 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala @@ -22,7 +22,7 @@ object AutoDownSpec { class AutoDownTestActor( autoDownUnreachableAfter: FiniteDuration, - probe: ActorRef) + probe: ActorRef) extends AutoDownBase(autoDownUnreachableAfter) { override def selfAddress = memberA.address diff --git a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala index 1f0de2f..b1e419a 100644 --- a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala @@ -94,7 +94,7 @@ class EWMASpec extends AkkaSpec(MetricsEnabledSpec.config) with MetricsCollector } else None } } - streamingDataSet ++= changes.map(m ⇒ m.name -> m) + streamingDataSet ++= changes.map(m ⇒ m.name → m) } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala index c9c0b92..0089fb2 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala @@ -1,5 +1,4 @@ /* - * Copyright (C) 2009-2016 Lightbend Inc. */ diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala index 9e266b5..b0f0711 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala @@ -99,7 +99,7 @@ class ReachabilitySpec extends WordSpec with Matchers { Reachability.Record(nodeC, nodeB, Unreachable, 2), Reachability.Record(nodeA, nodeD, Unreachable, 3), Reachability.Record(nodeD, nodeB, Terminated, 4)) - val versions = Map(nodeA -> 3L, nodeC -> 3L, nodeD -> 4L) + val versions = Map(nodeA → 3L, nodeC → 3L, nodeD → 4L) val r = Reachability(records, versions) r.status(nodeA) should ===(Reachable) r.status(nodeB) should ===(Terminated) @@ -137,9 +137,9 @@ class ReachabilitySpec extends WordSpec with Matchers { r.allUnreachableFrom(nodeD) should ===(Set(nodeA, nodeB)) r.observersGroupedByUnreachable should ===(Map( - nodeA -> Set(nodeB, nodeC, nodeD), - nodeB -> Set(nodeD), - nodeE -> Set(nodeA))) + nodeA → Set(nodeB, nodeC, nodeD), + nodeB → Set(nodeD), + nodeE → Set(nodeA))) } "merge by picking latest version of each record" in { @@ -200,11 +200,11 @@ class ReachabilitySpec extends WordSpec with Matchers { } "merge versions correctly" in { - val r1 = Reachability(Vector.empty, Map(nodeA -> 3L, nodeB -> 5L, nodeC -> 7L)) - val r2 = Reachability(Vector.empty, Map(nodeA -> 6L, nodeB -> 2L, nodeD -> 1L)) + val r1 = Reachability(Vector.empty, Map(nodeA → 3L, nodeB → 5L, nodeC → 7L)) + val r2 = Reachability(Vector.empty, Map(nodeA → 6L, nodeB → 2L, nodeD → 1L)) val merged = r1.merge(Set(nodeA, nodeB, nodeC, nodeD, nodeE), r2) - val expected = Map(nodeA -> 6L, nodeB -> 5L, nodeC -> 7L, nodeD -> 1L) + val expected = Map(nodeA → 6L, nodeB → 5L, nodeC → 7L, nodeD → 1L) merged.versions should ===(expected) val merged2 = r2.merge(Set(nodeA, nodeB, nodeC, nodeD, nodeE), r1) diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala index b792081..d03a85d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala @@ -63,15 +63,15 @@ class MetricsSelectorSpec extends WordSpec with Matchers { "CapacityMetricsSelector" must { "calculate weights from capacity" in { - val capacity = Map(a1 -> 0.6, b1 -> 0.3, c1 -> 0.1) + val capacity = Map(a1 → 0.6, b1 → 0.3, c1 → 0.1) val weights = abstractSelector.weights(capacity) - weights should ===(Map(c1 -> 1, b1 -> 3, a1 -> 6)) + weights should ===(Map(c1 → 1, b1 → 3, a1 → 6)) } "handle low and zero capacity" in { - val capacity = Map(a1 -> 0.0, b1 -> 1.0, c1 -> 0.005, d1 -> 0.004) + val capacity = Map(a1 → 0.0, b1 → 1.0, c1 → 0.005, d1 → 0.004) val weights = abstractSelector.weights(capacity) - weights should ===(Map(a1 -> 0, b1 -> 100, c1 -> 1, d1 -> 0)) + weights should ===(Map(a1 → 0, b1 → 100, c1 → 1, d1 → 0)) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala index a5322a4..3927e9d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala @@ -31,7 +31,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" "WeightedRoutees" must { "allocate weighted routees" in { - val weights = Map(a1 -> 1, b1 -> 3, c1 -> 10) + val weights = Map(a1 → 1, b1 → 3, c1 → 10) val weighted = new WeightedRoutees(routees, a1, weights) weighted(1) should ===(routeeA) @@ -47,7 +47,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" empty.total } - val empty2 = new WeightedRoutees(Vector(routeeA), a1, Map(a1 -> 0)) + val empty2 = new WeightedRoutees(Vector(routeeA), a1, Map(a1 → 0)) empty2.isEmpty should ===(true) intercept[IllegalArgumentException] { empty2.total @@ -67,7 +67,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" } "allocate routees for undefined weight" in { - val weights = Map(a1 -> 1, b1 -> 7) + val weights = Map(a1 → 1, b1 → 7) val weighted = new WeightedRoutees(routees, a1, weights) weighted(1) should ===(routeeA) @@ -78,7 +78,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" } "allocate weighted local routees" in { - val weights = Map(a1 -> 2, b1 -> 1, c1 -> 10) + val weights = Map(a1 → 2, b1 → 1, c1 → 10) val routees2 = Vector(testActorRoutee, routeeB, routeeC) val weighted = new WeightedRoutees(routees2, a1, weights) @@ -87,7 +87,7 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" } "not allocate ref with weight zero" in { - val weights = Map(a1 -> 0, b1 -> 2, c1 -> 10) + val weights = Map(a1 → 0, b1 → 2, c1 → 10) val weighted = new WeightedRoutees(routees, a1, weights) 1 to weighted.total foreach { weighted(_) should not be (routeeA) } diff --git a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala index 64e24e2..40f6848 100644 --- a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala +++ b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala @@ -39,13 +39,13 @@ object CircuitBreakerProxy { * @param failureMap function to map a failure into a response message. The failing response message is wrapped * into a [[akka.contrib.circuitbreaker.CircuitBreakerProxy.CircuitOpenFailure]] object */ - def props(target: ActorRef, - maxFailures: Int, - callTimeout: Timeout, - resetTimeout: Timeout, + def props(target: ActorRef, + maxFailures: Int, + callTimeout: Timeout, + resetTimeout: Timeout, circuitEventListener: Option[ActorRef], - failureDetector: Any ⇒ Boolean, - failureMap: CircuitOpenFailure ⇒ Any) = + failureDetector: Any ⇒ Boolean, + failureMap: CircuitOpenFailure ⇒ Any) = Props(new CircuitBreakerProxy(target, maxFailures, callTimeout, resetTimeout, circuitEventListener, failureDetector, failureMap)) sealed trait CircuitBreakerCommand @@ -70,8 +70,8 @@ object CircuitBreakerProxy { final case class CircuitBreakerPropsBuilder( maxFailures: Int, callTimeout: Timeout, resetTimeout: Timeout, - circuitEventListener: Option[ActorRef] = None, - failureDetector: Any ⇒ Boolean = { _ ⇒ false }, + circuitEventListener: Option[ActorRef] = None, + failureDetector: Any ⇒ Boolean = { _ ⇒ false }, openCircuitFailureConverter: CircuitOpenFailure ⇒ Any = identity) { def withMaxFailures(value: Int) = copy(maxFailures = value) @@ -100,13 +100,13 @@ object CircuitBreakerProxy { import akka.contrib.circuitbreaker.CircuitBreakerProxy._ final class CircuitBreakerProxy( - target: ActorRef, - maxFailures: Int, - callTimeout: Timeout, - resetTimeout: Timeout, + target: ActorRef, + maxFailures: Int, + callTimeout: Timeout, + resetTimeout: Timeout, circuitEventListener: Option[ActorRef], - failureDetector: Any ⇒ Boolean, - failureMap: CircuitOpenFailure ⇒ Any) extends Actor with ActorLogging with FSM[CircuitBreakerState, CircuitBreakerStateData] { + failureDetector: Any ⇒ Boolean, + failureMap: CircuitOpenFailure ⇒ Any) extends Actor with ActorLogging with FSM[CircuitBreakerState, CircuitBreakerStateData] { import CircuitBreakerInternalEvents._ @@ -243,15 +243,15 @@ final class CircuitBreakerProxy( } onTransition { - case from -> Closed ⇒ + case from → Closed ⇒ log.debug("Moving from state {} to state CLOSED", from) circuitEventListener foreach { _ ! CircuitClosed(self) } - case from -> HalfOpen ⇒ + case from → HalfOpen ⇒ log.debug("Moving from state {} to state HALF OPEN", from) circuitEventListener foreach { _ ! CircuitHalfOpen(self) } - case from -> Open ⇒ + case from → Open ⇒ log.debug("Moving from state {} to state OPEN", from) circuitEventListener foreach { _ ! CircuitOpen(self) } } diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala index 30509b8..1a7c59d 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala @@ -67,8 +67,8 @@ trait ReceivePipeline extends Actor { } private def combinedDecorator: Receive ⇒ Receive = { receive ⇒ - // So that reconstructed Receive PF is undefined only when the actor's - // receive is undefined for a transformed message that reaches it... + // So that reconstructed Receive PF is undefined only when the actor's + // receive is undefined for a transformed message that reaches it... val innerReceiveHandler: Handler = { case msg ⇒ receive.lift(msg).map(_ ⇒ Done).getOrElse(Undefined) } diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala index dea049e..82abf18 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala @@ -284,9 +284,9 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, } onTransition { - case _ -> Active ⇒ scheduleTick() - case Active -> Idle ⇒ cancelTimer(resendTimer) - case _ -> Connecting ⇒ scheduleReconnectTick() + case _ → Active ⇒ scheduleTick() + case Active → Idle ⇒ cancelTimer(resendTimer) + case _ → Connecting ⇒ scheduleReconnectTick() } when(Active) { diff --git a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala index 51b204b..3f40842 100644 --- a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala +++ b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala @@ -109,9 +109,9 @@ private[throttle] object TimerBasedThrottler { final case class Message(message: Any, sender: ActorRef) // The data of the FSM - final case class Data(target: Option[ActorRef], + final case class Data(target: Option[ActorRef], callsLeftInThisPeriod: Int, - queue: Q[Message]) + queue: Q[Message]) } /** @@ -277,8 +277,8 @@ class TimerBasedThrottler(var rate: Rate) extends Actor with FSM[State, Data] { } onTransition { - case Idle -> Active ⇒ startTimer(rate) - case Active -> Idle ⇒ stopTimer() + case Idle → Active ⇒ startTimer(rate) + case Active → Idle ⇒ stopTimer() } initialize() diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala index 0831e5e..fc21c40 100644 --- a/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala @@ -29,7 +29,7 @@ final case class GetCustomerAccountBalances(id: Long, accountTypes: Set[AccountT final case class GetAccountBalances(id: Long) final case class AccountBalances(accountType: AccountType, - balance: Option[List[(Long, BigDecimal)]]) + balance: Option[List[(Long, BigDecimal)]]) final case class CheckingAccountBalances(balances: Option[List[(Long, BigDecimal)]]) final case class SavingsAccountBalances(balances: Option[List[(Long, BigDecimal)]]) @@ -72,7 +72,7 @@ class AccountBalanceRetriever extends Actor with Aggregator { //#initial-expect class AccountAggregator(originalSender: ActorRef, - id: Long, types: Set[AccountType]) { + id: Long, types: Set[AccountType]) { val results = mutable.ArrayBuffer.empty[(AccountType, Option[List[(Long, BigDecimal)]])] @@ -97,7 +97,7 @@ class AccountBalanceRetriever extends Actor with Aggregator { context.actorOf(Props[CheckingAccountProxy]) ! GetAccountBalances(id) expectOnce { case CheckingAccountBalances(balances) ⇒ - results += (Checking -> balances) + results += (Checking → balances) collectBalances() } } @@ -107,7 +107,7 @@ class AccountBalanceRetriever extends Actor with Aggregator { context.actorOf(Props[SavingsAccountProxy]) ! GetAccountBalances(id) expectOnce { case SavingsAccountBalances(balances) ⇒ - results += (Savings -> balances) + results += (Savings → balances) collectBalances() } } @@ -116,7 +116,7 @@ class AccountBalanceRetriever extends Actor with Aggregator { context.actorOf(Props[MoneyMarketAccountProxy]) ! GetAccountBalances(id) expectOnce { case MoneyMarketAccountBalances(balances) ⇒ - results += (MoneyMarket -> balances) + results += (MoneyMarket → balances) collectBalances() } } diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala index 6035755..3e86ed3 100644 --- a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala @@ -246,7 +246,7 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit override def unhandled(message: Any) = probeRef ! message })) - // 11 ( -> not handled by EvenHalverInterceptor) -> 22 but > 10 so not handled in main receive: + // 11 ( -> not handled by EvenHalverInterceptor) -> 22 but > 10 so not handled in main receive: // original message falls back to unhandled implementation... replier ! 11 probe.expectMsg(11) @@ -409,10 +409,10 @@ object MixinSample extends App { //#mixin-model val texts = Map( - "that.rug_EN" -> "That rug really tied the room together.", - "your.opinion_EN" -> "Yeah, well, you know, that's just, like, your opinion, man.", - "that.rug_ES" -> "Esa alfombra realmente completaba la sala.", - "your.opinion_ES" -> "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.") + "that.rug_EN" → "That rug really tied the room together.", + "your.opinion_EN" → "Yeah, well, you know, that's just, like, your opinion, man.", + "that.rug_ES" → "Esa alfombra realmente completaba la sala.", + "your.opinion_ES" → "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.") case class I18nText(locale: String, key: String) case class Message(author: Option[String], text: Any) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala index 2b13578..df8d346 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala @@ -83,8 +83,8 @@ final class GCounter private[akka] ( else state.get(key) match { case Some(v) ⇒ val tot = v + delta - assignAncestor(new GCounter(state + (key -> tot))) - case None ⇒ assignAncestor(new GCounter(state + (key -> delta))) + assignAncestor(new GCounter(state + (key → tot))) + case None ⇒ assignAncestor(new GCounter(state + (key → delta))) } } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala index 6ca4c88..86ee1f5 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala @@ -47,7 +47,7 @@ final class LWWMap[A] private[akka] ( /** * Scala API: All entries of the map. */ - def entries: Map[String, A] = underlying.entries.map { case (k, r) ⇒ k -> r.value } + def entries: Map[String, A] = underlying.entries.map { case (k, r) ⇒ k → r.value } /** * Java API: All entries of the map. diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala index a4bf881..77eca28 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala @@ -93,8 +93,8 @@ object LWWRegister { @SerialVersionUID(1L) final class LWWRegister[A] private[akka] ( private[akka] val node: UniqueAddress, - val value: A, - val timestamp: Long) + val value: A, + val timestamp: Long) extends ReplicatedData with ReplicatedDataSerialization { import LWWRegister.{ Clock, defaultClock } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala index 66bc02d..2280ff2 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala @@ -33,7 +33,7 @@ object ORMap { */ @SerialVersionUID(1L) final class ORMap[A <: ReplicatedData] private[akka] ( - private[akka] val keys: ORSet[String], + private[akka] val keys: ORSet[String], private[akka] val values: Map[String, A]) extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala index 8f49a63..a742402 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala @@ -52,7 +52,7 @@ final class ORMultiMap[A] private[akka] (private[akka] val underlying: ORMap[ORS * Scala API: All entries of a multimap where keys are strings and values are sets. */ def entries: Map[String, Set[A]] = - underlying.entries.map { case (k, v) ⇒ k -> v.elements } + underlying.entries.map { case (k, v) ⇒ k → v.elements } /** * Java API: All entries of a multimap where keys are strings and values are sets. diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala index 89fe4ed..6ed208e 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala @@ -201,7 +201,7 @@ object ORSet { @SerialVersionUID(1L) final class ORSet[A] private[akka] ( private[akka] val elementsMap: Map[A, ORSet.Dot], - private[akka] val vvector: VersionVector) + private[akka] val vvector: VersionVector) extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning with FastMerge { type T = ORSet[A] diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala index 999ce0c..fe90897 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala @@ -34,12 +34,12 @@ final class PNCounterMap private[akka] ( type T = PNCounterMap /** Scala API */ - def entries: Map[String, BigInt] = underlying.entries.map { case (k, c) ⇒ k -> c.value } + def entries: Map[String, BigInt] = underlying.entries.map { case (k, c) ⇒ k → c.value } /** Java API */ def getEntries: java.util.Map[String, BigInteger] = { import scala.collection.JavaConverters._ - underlying.entries.map { case (k, c) ⇒ k -> c.value.bigInteger }.asJava + underlying.entries.map { case (k, c) ⇒ k → c.value.bigInteger }.asJava } /** diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index 8bc6771..184ef77 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -93,13 +93,13 @@ object ReplicatorSettings { * be configured to worst case in a healthy cluster. */ final class ReplicatorSettings( - val role: Option[String], - val gossipInterval: FiniteDuration, + val role: Option[String], + val gossipInterval: FiniteDuration, val notifySubscribersInterval: FiniteDuration, - val maxDeltaElements: Int, - val dispatcher: String, - val pruningInterval: FiniteDuration, - val maxPruningDissemination: FiniteDuration) { + val maxDeltaElements: Int, + val dispatcher: String, + val pruningInterval: FiniteDuration, + val maxPruningDissemination: FiniteDuration) { def withRole(role: String): ReplicatorSettings = copy(role = ReplicatorSettings.roleOption(role)) @@ -126,13 +126,13 @@ final class ReplicatorSettings( copy(pruningInterval = pruningInterval, maxPruningDissemination = maxPruningDissemination) private def copy( - role: Option[String] = role, - gossipInterval: FiniteDuration = gossipInterval, + role: Option[String] = role, + gossipInterval: FiniteDuration = gossipInterval, notifySubscribersInterval: FiniteDuration = notifySubscribersInterval, - maxDeltaElements: Int = maxDeltaElements, - dispatcher: String = dispatcher, - pruningInterval: FiniteDuration = pruningInterval, - maxPruningDissemination: FiniteDuration = maxPruningDissemination): ReplicatorSettings = + maxDeltaElements: Int = maxDeltaElements, + dispatcher: String = dispatcher, + pruningInterval: FiniteDuration = pruningInterval, + maxPruningDissemination: FiniteDuration = maxPruningDissemination): ReplicatorSettings = new ReplicatorSettings(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, maxPruningDissemination) } @@ -471,7 +471,7 @@ object Replicator { val NotFoundDigest: Digest = ByteString(-1) final case class DataEnvelope( - data: ReplicatedData, + data: ReplicatedData, pruning: Map[UniqueAddress, PruningState] = Map.empty) extends ReplicatorMessage { @@ -1048,14 +1048,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog if (keys.nonEmpty) { if (log.isDebugEnabled) log.debug("Sending gossip to [{}], containing [{}]", sender().path.address, keys.mkString(", ")) - val g = Gossip(keys.map(k ⇒ k -> getData(k).get)(collection.breakOut), sendBack = otherDifferentKeys.nonEmpty) + val g = Gossip(keys.map(k ⇒ k → getData(k).get)(collection.breakOut), sendBack = otherDifferentKeys.nonEmpty) sender() ! g } val myMissingKeys = otherKeys diff myKeys if (myMissingKeys.nonEmpty) { if (log.isDebugEnabled) log.debug("Sending gossip status to [{}], requesting missing [{}]", sender().path.address, myMissingKeys.mkString(", ")) - val status = Status(myMissingKeys.map(k ⇒ k -> NotFoundDigest)(collection.breakOut), chunk, totChunks) + val status = Status(myMissingKeys.map(k ⇒ k → NotFoundDigest)(collection.breakOut), chunk, totChunks) sender() ! status } } @@ -1305,12 +1305,12 @@ private[akka] abstract class ReadWriteAggregator extends Actor { */ private[akka] object WriteAggregator { def props( - key: KeyR, - envelope: Replicator.Internal.DataEnvelope, + key: KeyR, + envelope: Replicator.Internal.DataEnvelope, consistency: Replicator.WriteConsistency, - req: Option[Any], - nodes: Set[Address], - replyTo: ActorRef): Props = + req: Option[Any], + nodes: Set[Address], + replyTo: ActorRef): Props = Props(new WriteAggregator(key, envelope, consistency, req, nodes, replyTo)) .withDeploy(Deploy.local) } @@ -1319,12 +1319,12 @@ private[akka] object WriteAggregator { * INTERNAL API */ private[akka] class WriteAggregator( - key: KeyR, - envelope: Replicator.Internal.DataEnvelope, - consistency: Replicator.WriteConsistency, - req: Option[Any], + key: KeyR, + envelope: Replicator.Internal.DataEnvelope, + consistency: Replicator.WriteConsistency, + req: Option[Any], override val nodes: Set[Address], - replyTo: ActorRef) extends ReadWriteAggregator { + replyTo: ActorRef) extends ReadWriteAggregator { import Replicator._ import Replicator.Internal._ @@ -1384,12 +1384,12 @@ private[akka] class WriteAggregator( */ private[akka] object ReadAggregator { def props( - key: KeyR, + key: KeyR, consistency: Replicator.ReadConsistency, - req: Option[Any], - nodes: Set[Address], - localValue: Option[Replicator.Internal.DataEnvelope], - replyTo: ActorRef): Props = + req: Option[Any], + nodes: Set[Address], + localValue: Option[Replicator.Internal.DataEnvelope], + replyTo: ActorRef): Props = Props(new ReadAggregator(key, consistency, req, nodes, localValue, replyTo)) .withDeploy(Deploy.local) @@ -1399,12 +1399,12 @@ private[akka] object ReadAggregator { * INTERNAL API */ private[akka] class ReadAggregator( - key: KeyR, - consistency: Replicator.ReadConsistency, - req: Option[Any], + key: KeyR, + consistency: Replicator.ReadConsistency, + req: Option[Any], override val nodes: Set[Address], - localValue: Option[Replicator.Internal.DataEnvelope], - replyTo: ActorRef) extends ReadWriteAggregator { + localValue: Option[Replicator.Internal.DataEnvelope], + replyTo: ActorRef) extends ReadWriteAggregator { import Replicator._ import Replicator.Internal._ diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala index 99ee99b..e0a150c 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala @@ -262,7 +262,7 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L private[akka] override def increment(n: UniqueAddress): VersionVector = { val v = Timestamp.counter.getAndIncrement() if (n == node) copy(version = v) - else ManyVersionVector(TreeMap(node -> version, n -> v)) + else ManyVersionVector(TreeMap(node → version, n → v)) } /** INTERNAL API */ @@ -282,7 +282,7 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L that match { case OneVersionVector(n2, v2) ⇒ if (node == n2) if (version >= v2) this else OneVersionVector(n2, v2) - else ManyVersionVector(TreeMap(node -> version, n2 -> v2)) + else ManyVersionVector(TreeMap(node → version, n2 → v2)) case ManyVersionVector(vs2) ⇒ val v2 = vs2.getOrElse(node, Timestamp.Zero) val mergedVersions = diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala index 41f9d31..eaf2b61 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala @@ -52,29 +52,29 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) private val VersionVectorManifest = "L" private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( - GSetManifest -> gsetFromBinary, - ORSetManifest -> orsetFromBinary, - FlagManifest -> flagFromBinary, - LWWRegisterManifest -> lwwRegisterFromBinary, - GCounterManifest -> gcounterFromBinary, - PNCounterManifest -> pncounterFromBinary, - ORMapManifest -> ormapFromBinary, - LWWMapManifest -> lwwmapFromBinary, - PNCounterMapManifest -> pncountermapFromBinary, - ORMultiMapManifest -> multimapFromBinary, - DeletedDataManifest -> (_ ⇒ DeletedData), - VersionVectorManifest -> versionVectorFromBinary, - - GSetKeyManifest -> (bytes ⇒ GSetKey(keyIdFromBinary(bytes))), - ORSetKeyManifest -> (bytes ⇒ ORSetKey(keyIdFromBinary(bytes))), - FlagKeyManifest -> (bytes ⇒ FlagKey(keyIdFromBinary(bytes))), - LWWRegisterKeyManifest -> (bytes ⇒ LWWRegisterKey(keyIdFromBinary(bytes))), - GCounterKeyManifest -> (bytes ⇒ GCounterKey(keyIdFromBinary(bytes))), - PNCounterKeyManifest -> (bytes ⇒ PNCounterKey(keyIdFromBinary(bytes))), - ORMapKeyManifest -> (bytes ⇒ ORMapKey(keyIdFromBinary(bytes))), - LWWMapKeyManifest -> (bytes ⇒ LWWMapKey(keyIdFromBinary(bytes))), - PNCounterMapKeyManifest -> (bytes ⇒ PNCounterMapKey(keyIdFromBinary(bytes))), - ORMultiMapKeyManifest -> (bytes ⇒ ORMultiMapKey(keyIdFromBinary(bytes)))) + GSetManifest → gsetFromBinary, + ORSetManifest → orsetFromBinary, + FlagManifest → flagFromBinary, + LWWRegisterManifest → lwwRegisterFromBinary, + GCounterManifest → gcounterFromBinary, + PNCounterManifest → pncounterFromBinary, + ORMapManifest → ormapFromBinary, + LWWMapManifest → lwwmapFromBinary, + PNCounterMapManifest → pncountermapFromBinary, + ORMultiMapManifest → multimapFromBinary, + DeletedDataManifest → (_ ⇒ DeletedData), + VersionVectorManifest → versionVectorFromBinary, + + GSetKeyManifest → (bytes ⇒ GSetKey(keyIdFromBinary(bytes))), + ORSetKeyManifest → (bytes ⇒ ORSetKey(keyIdFromBinary(bytes))), + FlagKeyManifest → (bytes ⇒ FlagKey(keyIdFromBinary(bytes))), + LWWRegisterKeyManifest → (bytes ⇒ LWWRegisterKey(keyIdFromBinary(bytes))), + GCounterKeyManifest → (bytes ⇒ GCounterKey(keyIdFromBinary(bytes))), + PNCounterKeyManifest → (bytes ⇒ PNCounterKey(keyIdFromBinary(bytes))), + ORMapKeyManifest → (bytes ⇒ ORMapKey(keyIdFromBinary(bytes))), + LWWMapKeyManifest → (bytes ⇒ LWWMapKey(keyIdFromBinary(bytes))), + PNCounterMapKeyManifest → (bytes ⇒ PNCounterMapKey(keyIdFromBinary(bytes))), + ORMultiMapKeyManifest → (bytes ⇒ ORMultiMapKey(keyIdFromBinary(bytes)))) override def manifest(obj: AnyRef): String = obj match { case _: ORSet[_] ⇒ ORSetManifest @@ -284,7 +284,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def gcounterFromProto(gcounter: rd.GCounter): GCounter = { new GCounter(state = gcounter.getEntriesList.asScala.map(entry ⇒ - uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray))(breakOut)) + uniqueAddressFromProto(entry.getNode) → BigInt(entry.getValue.toByteArray))(breakOut)) } def pncounterToProto(pncounter: PNCounter): rd.PNCounter = @@ -322,7 +322,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) VersionVector(uniqueAddressFromProto(entries.get(0).getNode), entries.get(0).getVersion) else { val versions: TreeMap[UniqueAddress, Long] = versionVector.getEntriesList.asScala.map(entry ⇒ - uniqueAddressFromProto(entry.getNode) -> entry.getVersion)(breakOut) + uniqueAddressFromProto(entry.getNode) → entry.getVersion)(breakOut) VersionVector(versions) } } @@ -341,7 +341,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def ormapFromProto(ormap: rd.ORMap): ORMap[ReplicatedData] = { val entries = ormap.getEntriesList.asScala.map(entry ⇒ - entry.getKey -> otherMessageFromProto(entry.getValue).asInstanceOf[ReplicatedData]).toMap + entry.getKey → otherMessageFromProto(entry.getValue).asInstanceOf[ReplicatedData]).toMap new ORMap( keys = orsetFromProto(ormap.getKeys).asInstanceOf[ORSet[String]], entries) @@ -361,7 +361,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def lwwmapFromProto(lwwmap: rd.LWWMap): LWWMap[Any] = { val entries = lwwmap.getEntriesList.asScala.map(entry ⇒ - entry.getKey -> lwwRegisterFromProto(entry.getValue)).toMap + entry.getKey → lwwRegisterFromProto(entry.getValue)).toMap new LWWMap(new ORMap( keys = orsetFromProto(lwwmap.getKeys).asInstanceOf[ORSet[String]], entries)) @@ -381,7 +381,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def pncountermapFromProto(pncountermap: rd.PNCounterMap): PNCounterMap = { val entries = pncountermap.getEntriesList.asScala.map(entry ⇒ - entry.getKey -> pncounterFromProto(entry.getValue)).toMap + entry.getKey → pncounterFromProto(entry.getValue)).toMap new PNCounterMap(new ORMap( keys = orsetFromProto(pncountermap.getKeys).asInstanceOf[ORSet[String]], entries)) @@ -401,7 +401,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def multimapFromProto(multimap: rd.ORMultiMap): ORMultiMap[Any] = { val entries = multimap.getEntriesList.asScala.map(entry ⇒ - entry.getKey -> orsetFromProto(entry.getValue)).toMap + entry.getKey → orsetFromProto(entry.getValue)).toMap new ORMultiMap(new ORMap( keys = orsetFromProto(multimap.getKeys).asInstanceOf[ORSet[String]], entries)) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala index 42f46ec..f86645b 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala @@ -169,20 +169,20 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) val GossipManifest = "N" private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] ⇒ AnyRef]( - GetManifest -> getFromBinary, - GetSuccessManifest -> getSuccessFromBinary, - NotFoundManifest -> notFoundFromBinary, - GetFailureManifest -> getFailureFromBinary, - SubscribeManifest -> subscribeFromBinary, - UnsubscribeManifest -> unsubscribeFromBinary, - ChangedManifest -> changedFromBinary, - DataEnvelopeManifest -> dataEnvelopeFromBinary, - WriteManifest -> writeFromBinary, - WriteAckManifest -> (_ ⇒ WriteAck), - ReadManifest -> readFromBinary, - ReadResultManifest -> readResultFromBinary, - StatusManifest -> statusFromBinary, - GossipManifest -> gossipFromBinary) + GetManifest → getFromBinary, + GetSuccessManifest → getSuccessFromBinary, + NotFoundManifest → notFoundFromBinary, + GetFailureManifest → getFailureFromBinary, + SubscribeManifest → subscribeFromBinary, + UnsubscribeManifest → unsubscribeFromBinary, + ChangedManifest → changedFromBinary, + DataEnvelopeManifest → dataEnvelopeFromBinary, + WriteManifest → writeFromBinary, + WriteAckManifest → (_ ⇒ WriteAck), + ReadManifest → readFromBinary, + ReadResultManifest → readResultFromBinary, + StatusManifest → statusFromBinary, + GossipManifest → gossipFromBinary) override def manifest(obj: AnyRef): String = obj match { case _: DataEnvelope ⇒ DataEnvelopeManifest @@ -244,7 +244,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def statusFromBinary(bytes: Array[Byte]): Status = { val status = dm.Status.parseFrom(bytes) Status(status.getEntriesList.asScala.map(e ⇒ - e.getKey -> AkkaByteString(e.getDigest.toByteArray()))(breakOut), + e.getKey → AkkaByteString(e.getDigest.toByteArray()))(breakOut), status.getChunk, status.getTotChunks) } @@ -262,7 +262,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def gossipFromBinary(bytes: Array[Byte]): Gossip = { val gossip = dm.Gossip.parseFrom(decompress(bytes)) Gossip(gossip.getEntriesList.asScala.map(e ⇒ - e.getKey -> dataEnvelopeFromProto(e.getEnvelope))(breakOut), + e.getKey → dataEnvelopeFromProto(e.getEnvelope))(breakOut), sendBack = gossip.getSendBack) } @@ -408,7 +408,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) else PruningState.PruningInitialized(pruningEntry.getSeenList.asScala.map(addressFromProto)(breakOut)) val state = PruningState(uniqueAddressFromProto(pruningEntry.getOwnerAddress), phase) val removed = uniqueAddressFromProto(pruningEntry.getRemovedAddress) - removed -> state + removed → state }(breakOut) val data = otherMessageFromProto(dataEnvelope.getData).asInstanceOf[ReplicatedData] DataEnvelope(data, pruning) diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala index 50d02ea..b7a70f8 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala @@ -59,7 +59,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w // val totalCount = 2000 val expectedData = (0 until totalCount).toSet val data: Map[RoleName, Seq[Int]] = { - val nodeIndex = nodes.zipWithIndex.map { case (n, i) ⇒ i -> n }.toMap + val nodeIndex = nodes.zipWithIndex.map { case (n, i) ⇒ i → n }.toMap (0 until totalCount).groupBy(i ⇒ nodeIndex(i % nodeCount)) } lazy val myData: Seq[Int] = data(myself) diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala index 6954199..b24b185 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala @@ -146,7 +146,7 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST replicator ! Get(KeyC, ReadLocal) expectMsgPF() { case g @ GetSuccess(KeyC, _) ⇒ - g.get(KeyC).entries should be(Map("x" -> 3L, "y" -> 3L)) + g.get(KeyC).entries should be(Map("x" → 3L, "y" → 3L)) g.get(KeyC).needPruningFrom(thirdUniqueAddress) should be(false) } } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala index f37b7ee..2c72c3c 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala @@ -526,22 +526,22 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec runOn(second) { replicator ! Subscribe(KeyH, changedProbe.ref) - replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" -> Flag(enabled = false))) - changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" -> Flag(enabled = false))) + replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" → Flag(enabled = false))) + changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" → Flag(enabled = false))) } enterBarrier("update-h1") runOn(first) { - replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" -> Flag(enabled = true))) + replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" → Flag(enabled = true))) } runOn(second) { - changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" -> Flag(enabled = true))) + changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be(Map("a" → Flag(enabled = true))) - replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("b" -> Flag(enabled = true))) + replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("b" → Flag(enabled = true))) changedProbe.expectMsgPF() { case c @ Changed(KeyH) ⇒ c.get(KeyH).entries } should be( - Map("a" -> Flag(enabled = true), "b" -> Flag(enabled = true))) + Map("a" → Flag(enabled = true), "b" → Flag(enabled = true))) } enterBarrierAfterTestStep() diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala index 5ca79a3..d5e70cf 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala @@ -20,7 +20,7 @@ class LWWMapSpec extends WordSpec with Matchers { "be able to set entries" in { val m = LWWMap.empty[Int].put(node1, "a", 1, defaultClock[Int]).put(node2, "b", 2, defaultClock[Int]) - m.entries should be(Map("a" -> 1, "b" -> 2)) + m.entries should be(Map("a" → 1, "b" → 2)) } "be able to have its entries correctly merged with another LWWMap with other entries" in { @@ -28,7 +28,7 @@ class LWWMapSpec extends WordSpec with Matchers { val m2 = LWWMap.empty.put(node2, "c", 3, defaultClock[Int]) // merge both ways - val expected = Map("a" -> 1, "b" -> 2, "c" -> 3) + val expected = Map("a" → 1, "b" → 2, "c" → 3) (m1 merge m2).entries should be(expected) (m2 merge m1).entries should be(expected) } @@ -40,11 +40,11 @@ class LWWMapSpec extends WordSpec with Matchers { val merged1 = m1 merge m2 val m3 = merged1.remove(node1, "b") - (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 3)) + (merged1 merge m3).entries should be(Map("a" → 1, "c" → 3)) // but if there is a conflicting update the entry is not removed val m4 = merged1.put(node2, "b", 22, defaultClock[Int]) - (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3)) + (m3 merge m4).entries should be(Map("a" → 1, "b" → 22, "c" → 3)) } "have unapply extractor" in { @@ -55,7 +55,7 @@ class LWWMapSpec extends WordSpec with Matchers { case c @ Changed(LWWMapKey("key")) ⇒ val LWWMap(entries3) = c.dataValue val entries4: Map[String, Long] = entries3 - entries4 should be(Map("a" -> 1L)) + entries4 should be(Map("a" → 1L)) } } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala index 7b96a1c..8fb2922 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala @@ -197,7 +197,7 @@ class ORMapSpec extends WordSpec with Matchers { case c @ Changed(ORMapKey("key")) ⇒ val ORMap(entries3) = c.dataValue val entries4: Map[String, ReplicatedData] = entries3 - entries4 should be(Map("a" -> Flag(true), "b" -> Flag(false))) + entries4 should be(Map("a" → Flag(true), "b" → Flag(false))) } } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala index 38b6e93..e0d027e 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala @@ -17,20 +17,20 @@ class ORMultiMapSpec extends WordSpec with Matchers { "be able to add entries" in { val m = ORMultiMap().addBinding(node1, "a", "A").addBinding(node1, "b", "B") - m.entries should be(Map("a" -> Set("A"), "b" -> Set("B"))) + m.entries should be(Map("a" → Set("A"), "b" → Set("B"))) val m2 = m.addBinding(node1, "a", "C") - m2.entries should be(Map("a" -> Set("A", "C"), "b" -> Set("B"))) + m2.entries should be(Map("a" → Set("A", "C"), "b" → Set("B"))) } "be able to remove entry" in { val m = ORMultiMap().addBinding(node1, "a", "A").addBinding(node1, "b", "B").removeBinding(node1, "a", "A") - m.entries should be(Map("b" -> Set("B"))) + m.entries should be(Map("b" → Set("B"))) } "be able to replace an entry" in { val m = ORMultiMap().addBinding(node1, "a", "A").replaceBinding(node1, "a", "A", "B") - m.entries should be(Map("a" -> Set("B"))) + m.entries should be(Map("a" → Set("B"))) } "be able to have its entries correctly merged with another ORMultiMap with other entries" in { @@ -40,9 +40,9 @@ class ORMultiMapSpec extends WordSpec with Matchers { // merge both ways val expectedMerge = Map( - "a" -> Set("A"), - "b" -> Set("B"), - "c" -> Set("C")) + "a" → Set("A"), + "b" → Set("B"), + "c" → Set("C")) val merged1 = m1 merge m2 merged1.entries should be(expectedMerge) @@ -67,10 +67,10 @@ class ORMultiMapSpec extends WordSpec with Matchers { // merge both ways val expectedMerged = Map( - "a" -> Set("A2"), - "b" -> Set("B1"), - "c" -> Set("C2"), - "d" -> Set("D1", "D2")) + "a" → Set("A2"), + "b" → Set("B1"), + "c" → Set("C2"), + "d" → Set("D1", "D2")) val merged1 = m1 merge m2 merged1.entries should be(expectedMerged) @@ -89,8 +89,8 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m2 = m.put(node1, "a", a - "A1") val expectedMerged = Map( - "a" -> Set("A2"), - "b" -> Set("B1")) + "a" → Set("A2"), + "b" → Set("B1")) m2.entries should be(expectedMerged) } @@ -104,7 +104,7 @@ class ORMultiMapSpec extends WordSpec with Matchers { "remove all bindings for a given key" in { val m = ORMultiMap().addBinding(node1, "a", "A1").addBinding(node1, "a", "A2").addBinding(node1, "b", "B1") val m2 = m.remove(node1, "a") - m2.entries should be(Map("b" -> Set("B1"))) + m2.entries should be(Map("b" → Set("B1"))) } "have unapply extractor" in { @@ -116,7 +116,7 @@ class ORMultiMapSpec extends WordSpec with Matchers { case c @ Changed(ORMultiMapKey("key")) ⇒ val ORMultiMap(entries3) = c.dataValue val entries4: Map[String, Set[Long]] = entries3 - entries4 should be(Map("a" -> Set(1L, 2L), "b" -> Set(3L))) + entries4 should be(Map("a" → Set(1L, 2L), "b" → Set(3L))) } } } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala index 27078d6..b359c3d 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala @@ -229,30 +229,30 @@ class ORSetSpec extends WordSpec with Matchers { "ORSet unit test" must { "verify subtractDots" in { - val dot = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 2L, nodeD -> 14L, nodeG -> 22L)) - val vvector = VersionVector(TreeMap(nodeA -> 4L, nodeB -> 1L, nodeC -> 1L, nodeD -> 14L, nodeE -> 5L, nodeF -> 2L)) - val expected = VersionVector(TreeMap(nodeB -> 2L, nodeG -> 22L)) + val dot = VersionVector(TreeMap(nodeA → 3L, nodeB → 2L, nodeD → 14L, nodeG → 22L)) + val vvector = VersionVector(TreeMap(nodeA → 4L, nodeB → 1L, nodeC → 1L, nodeD → 14L, nodeE → 5L, nodeF → 2L)) + val expected = VersionVector(TreeMap(nodeB → 2L, nodeG → 22L)) ORSet.subtractDots(dot, vvector) should be(expected) } "verify mergeCommonKeys" in { val commonKeys: Set[String] = Set("K1", "K2") - val thisDot1 = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L)) - val thisDot2 = VersionVector(TreeMap(nodeB -> 5L, nodeC -> 2L)) - val thisVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 5L, nodeC -> 2L, nodeD -> 7L)) + val thisDot1 = VersionVector(TreeMap(nodeA → 3L, nodeD → 7L)) + val thisDot2 = VersionVector(TreeMap(nodeB → 5L, nodeC → 2L)) + val thisVvector = VersionVector(TreeMap(nodeA → 3L, nodeB → 5L, nodeC → 2L, nodeD → 7L)) val thisSet = new ORSet( - elementsMap = Map("K1" -> thisDot1, "K2" -> thisDot2), + elementsMap = Map("K1" → thisDot1, "K2" → thisDot2), vvector = thisVvector) val thatDot1 = VersionVector(nodeA, 3L) val thatDot2 = VersionVector(nodeB, 6L) - val thatVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 6L, nodeC -> 1L, nodeD -> 8L)) + val thatVvector = VersionVector(TreeMap(nodeA → 3L, nodeB → 6L, nodeC → 1L, nodeD → 8L)) val thatSet = new ORSet( - elementsMap = Map("K1" -> thatDot1, "K2" -> thatDot2), + elementsMap = Map("K1" → thatDot1, "K2" → thatDot2), vvector = thatVvector) val expectedDots = Map( - "K1" -> VersionVector(nodeA, 3L), - "K2" -> VersionVector(TreeMap(nodeB -> 6L, nodeC -> 2L))) + "K1" → VersionVector(nodeA, 3L), + "K2" → VersionVector(TreeMap(nodeB → 6L, nodeC → 2L))) ORSet.mergeCommonKeys(commonKeys, thisSet, thatSet) should be(expectedDots) } @@ -260,14 +260,14 @@ class ORSetSpec extends WordSpec with Matchers { "verify mergeDisjointKeys" in { val keys: Set[Any] = Set("K3", "K4", "K5") val elements: Map[Any, VersionVector] = Map( - "K3" -> VersionVector(nodeA, 4L), - "K4" -> VersionVector(TreeMap(nodeA -> 3L, nodeD -> 8L)), - "K5" -> VersionVector(nodeA, 2L)) - val vvector = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L)) - val acc: Map[Any, VersionVector] = Map("K1" -> VersionVector(nodeA, 3L)) + "K3" → VersionVector(nodeA, 4L), + "K4" → VersionVector(TreeMap(nodeA → 3L, nodeD → 8L)), + "K5" → VersionVector(nodeA, 2L)) + val vvector = VersionVector(TreeMap(nodeA → 3L, nodeD → 7L)) + val acc: Map[Any, VersionVector] = Map("K1" → VersionVector(nodeA, 3L)) val expectedDots = acc ++ Map( - "K3" -> VersionVector(nodeA, 4L), - "K4" -> VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen + "K3" → VersionVector(nodeA, 4L), + "K4" → VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen ORSet.mergeDisjointKeys(keys, elements, vvector, acc) should be(expectedDots) } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala index 3b621f1..fc5234a 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala @@ -19,7 +19,7 @@ class PNCounterMapSpec extends WordSpec with Matchers { "be able to increment and decrement entries" in { val m = PNCounterMap().increment(node1, "a", 2).increment(node1, "b", 3).decrement(node2, "a", 1) - m.entries should be(Map("a" -> 1, "b" -> 3)) + m.entries should be(Map("a" → 1, "b" → 3)) } "be able to have its entries correctly merged with another ORMap with other entries" in { @@ -27,7 +27,7 @@ class PNCounterMapSpec extends WordSpec with Matchers { val m2 = PNCounterMap().increment(node2, "c", 5) // merge both ways - val expected = Map("a" -> 1, "b" -> 3, "c" -> 7) + val expected = Map("a" → 1, "b" → 3, "c" → 7) (m1 merge m2).entries should be(expected) (m2 merge m1).entries should be(expected) } @@ -39,11 +39,11 @@ class PNCounterMapSpec extends WordSpec with Matchers { val merged1 = m1 merge m2 val m3 = merged1.remove(node1, "b") - (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 7)) + (merged1 merge m3).entries should be(Map("a" → 1, "c" → 7)) // but if there is a conflicting update the entry is not removed val m4 = merged1.increment(node2, "b", 10) - (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7)) + (m3 merge m4).entries should be(Map("a" → 1, "b" → 13, "c" → 7)) } "have unapply extractor" in { @@ -54,7 +54,7 @@ class PNCounterMapSpec extends WordSpec with Matchers { case c @ Changed(PNCounterMapKey("key")) ⇒ val PNCounterMap(entries3) = c.dataValue val entries4: Map[String, BigInt] = entries3 - entries4 should be(Map("a" -> 1L, "b" -> 2L)) + entries4 should be(Map("a" → 1L, "b" → 2L)) } } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala index 98e0776..5ce047d 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala @@ -68,7 +68,7 @@ class WriteAggregatorSpec extends AkkaSpec(""" val writeMajority = WriteMajority(timeout) def probes(probe: ActorRef): Map[Address, ActorRef] = - nodes.toSeq.map(_ -> system.actorOf(WriteAggregatorSpec.writeAckAdapterProps(probe))).toMap + nodes.toSeq.map(_ → system.actorOf(WriteAggregatorSpec.writeAckAdapterProps(probe))).toMap "WriteAggregator" must { "send to at least N/2+1 replicas when WriteMajority" in { diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala index a622d6a..ebf6011 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala @@ -64,17 +64,17 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMes checkSerialization(Changed(keyA)(data1)) checkSerialization(DataEnvelope(data1)) checkSerialization(DataEnvelope(data1, pruning = Map( - address1 -> PruningState(address2, PruningPerformed), - address3 -> PruningState(address2, PruningInitialized(Set(address1.address)))))) + address1 → PruningState(address2, PruningPerformed), + address3 → PruningState(address2, PruningInitialized(Set(address1.address)))))) checkSerialization(Write("A", DataEnvelope(data1))) checkSerialization(WriteAck) checkSerialization(Read("A")) checkSerialization(ReadResult(Some(DataEnvelope(data1)))) checkSerialization(ReadResult(None)) - checkSerialization(Status(Map("A" -> ByteString.fromString("a"), - "B" -> ByteString.fromString("b")), chunk = 3, totChunks = 10)) - checkSerialization(Gossip(Map("A" -> DataEnvelope(data1), - "B" -> DataEnvelope(GSet() + "b" + "c")), sendBack = true)) + checkSerialization(Status(Map("A" → ByteString.fromString("a"), + "B" → ByteString.fromString("b")), chunk = 3, totChunks = 10)) + checkSerialization(Gossip(Map("A" → DataEnvelope(data1), + "B" → DataEnvelope(GSet() + "b" + "c")), sendBack = true)) } } @@ -141,7 +141,7 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMes "handle Int wrap around" ignore { // ignored because it takes 20 seconds (but it works) val cache = new SmallCache[Read, String](2, 5.seconds, _ ⇒ null) val a = Read("a") - val x = a -> "A" + val x = a → "A" var n = 0 while (n <= Int.MaxValue - 3) { cache.add(x) diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala index 179d1c8..af97199 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/OutgoingConnectionBlueprint.scala @@ -31,7 +31,6 @@ private[http] object OutgoingConnectionBlueprint { /* Stream Setup ============ - requestIn +----------+ +-----------------------------------------------+--->| Termi- | requestRendering | | nation +---------------------> | @@ -46,8 +45,8 @@ private[http] object OutgoingConnectionBlueprint { +------------+ */ def apply(hostHeader: Host, - settings: ClientConnectionSettings, - log: LoggingAdapter): Http.ClientLayer = { + settings: ClientConnectionSettings, + log: LoggingAdapter): Http.ClientLayer = { import settings._ // the initial header parser we initially use for every connection, diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala index 7a87a98..fb9d2b9 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolConductor.scala @@ -21,9 +21,9 @@ private object PoolConductor { import PoolSlot.{ RawSlotEvent, SlotEvent } case class Ports( - requestIn: Inlet[RequestContext], + requestIn: Inlet[RequestContext], slotEventIn: Inlet[RawSlotEvent], - slotOuts: immutable.Seq[Outlet[RequestContext]]) extends Shape { + slotOuts: immutable.Seq[Outlet[RequestContext]]) extends Shape { override val inlets = requestIn :: slotEventIn :: Nil override def outlets = slotOuts @@ -61,7 +61,6 @@ private object PoolConductor { +-------------+  retry |<-------- RawSlotEvent (from slotEventMerge) |  Split  | +---------+ - */ def apply(slotCount: Int, pipeliningLimit: Int, log: LoggingAdapter): Graph[Ports, Any] = GraphDSL.create() { implicit b ⇒ @@ -194,7 +193,7 @@ private object PoolConductor { @tailrec def bestSlot(ix: Int = 0, bestIx: Int = -1, bestState: SlotState = Busy): Int = if (ix < slotStates.length) { val pl = pipeliningLimit - slotStates(ix) -> bestState match { + slotStates(ix) → bestState match { case (Idle, _) ⇒ ix case (Unconnected, Loaded(_) | Busy) ⇒ bestSlot(ix + 1, ix, Unconnected) case (x @ Loaded(a), Loaded(b)) if a < b ⇒ bestSlot(ix + 1, ix, x) diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala index 5f65b3c..483ab0e 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolFlow.scala @@ -50,27 +50,24 @@ private object PoolFlow {              |  Merge  | <-------------------+     |                                                |           | <-------------------------+                                   +-----------+ - Conductor: - Maintains slot state overview by running a simple state machine per Connection Slot - Decides which slot will receive the next request from upstream according to current slot state and dispatch configuration - Forwards demand from selected slot to upstream - Always maintains demand for SlotEvents from the Connection Slots - Implemented as a sub-graph - Connection Slot: - Wraps a low-level outgoing connection flow and (re-)materializes and uses it whenever necessary - Directly forwards demand from the underlying connection to the Conductor - Dispatches SlotEvents to the Conductor (via the SlotEventMerge) - Implemented as a sub-graph - Response Merge: - Simple merge of the Connection Slots' outputs - */ def apply(connectionFlow: Flow[HttpRequest, HttpResponse, Future[Http.OutgoingConnection]], - remoteAddress: InetSocketAddress, settings: ConnectionPoolSettings, log: LoggingAdapter)( - implicit system: ActorSystem, fm: Materializer): Flow[RequestContext, ResponseContext, NotUsed] = + remoteAddress: InetSocketAddress, settings: ConnectionPoolSettings, log: LoggingAdapter)( + implicit + system: ActorSystem, fm: Materializer): Flow[RequestContext, ResponseContext, NotUsed] = Flow.fromGraph(GraphDSL.create[FlowShape[RequestContext, ResponseContext]]() { implicit b ⇒ import settings._ import GraphDSL.Implicits._ diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolGateway.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolGateway.scala index 91490e9..fc79c92 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolGateway.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolGateway.scala @@ -18,8 +18,8 @@ import akka.stream.Materializer private object PoolGateway { sealed trait State - final case class Running(interfaceActorRef: ActorRef, - shutdownStartedPromise: Promise[Done], + final case class Running(interfaceActorRef: ActorRef, + shutdownStartedPromise: Promise[Done], shutdownCompletedPromise: Promise[Done]) extends State final case class IsShutdown(shutdownCompleted: Future[Done]) extends State final case class NewIncarnation(gatewayFuture: Future[PoolGateway]) extends State @@ -38,9 +38,10 @@ private object PoolGateway { * Removal of cache entries for terminated pools is also supported, because old gateway references that * get reused will automatically forward requests directed at them to the latest pool incarnation from the cache. */ -private[http] class PoolGateway(hcps: HostConnectionPoolSetup, +private[http] class PoolGateway(hcps: HostConnectionPoolSetup, _shutdownStartedPromise: Promise[Done])( // constructor arg only - implicit system: ActorSystem, fm: Materializer) { + implicit + system: ActorSystem, fm: Materializer) { import PoolGateway._ import fm.executionContext diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolInterfaceActor.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolInterfaceActor.scala index c2851a5..9e2d3f3 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolInterfaceActor.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolInterfaceActor.scala @@ -46,9 +46,9 @@ private object PoolInterfaceActor { * To the inside (i.e. the running connection pool flow) the gateway actor acts as request source * (ActorPublisher) and response sink (ActorSubscriber). */ -private class PoolInterfaceActor(hcps: HostConnectionPoolSetup, +private class PoolInterfaceActor(hcps: HostConnectionPoolSetup, shutdownCompletedPromise: Promise[Done], - gateway: PoolGateway)(implicit fm: Materializer) + gateway: PoolGateway)(implicit fm: Materializer) extends ActorSubscriber with ActorPublisher[RequestContext] with ActorLogging { import PoolInterfaceActor._ diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala index d866f04..f6eb214 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/client/PoolSlot.scala @@ -38,7 +38,6 @@ private object PoolSlot { /* Stream Setup ============ - Request-  +-----------+  +-------------+  +-------------+    +------------+ Context  | Slot-     |  List[ | flatten | Processor-  | doubler |    | SlotEvent- |  Response- +--------->| Processor +------------->| (MapConcat) +------------->| (MapConcat) +---->| Split      +-------------> @@ -51,8 +50,8 @@ private object PoolSlot { */ def apply(slotIx: Int, connectionFlow: Flow[HttpRequest, HttpResponse, Any], remoteAddress: InetSocketAddress, // TODO: remove after #16168 is cleared - settings: ConnectionPoolSettings)(implicit system: ActorSystem, - fm: Materializer): Graph[FanOutShape2[RequestContext, ResponseContext, RawSlotEvent], Any] = + settings: ConnectionPoolSettings)(implicit system: ActorSystem, + fm: Materializer): Graph[FanOutShape2[RequestContext, ResponseContext, RawSlotEvent], Any] = GraphDSL.create() { implicit b ⇒ import GraphDSL.Implicits._ diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala index 65f2106..732592e 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/BodyPartParser.scala @@ -23,9 +23,9 @@ import akka.stream.impl.fusing.SubSource * see: http://tools.ietf.org/html/rfc2046#section-5.1.1 */ private[http] final class BodyPartParser(defaultContentType: ContentType, - boundary: String, - log: LoggingAdapter, - settings: BodyPartParser.Settings) + boundary: String, + log: LoggingAdapter, + settings: BodyPartParser.Settings) extends PushPullStage[ByteString, BodyPartParser.Output] { import BodyPartParser._ import settings._ diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala index 2bbcb32..13bffd5 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpHeaderParser.scala @@ -59,15 +59,15 @@ import akka.http.impl.model.parser.CharacterClasses._ * cannot hold more then 255 items, so this array has a fixed size of 255. */ private[engine] final class HttpHeaderParser private ( - val settings: HttpHeaderParser.Settings, - onIllegalHeader: ErrorInfo ⇒ Unit, - private[this] var nodes: Array[Char] = new Array(512), // initial size, can grow as needed - private[this] var nodeCount: Int = 0, - private[this] var branchData: Array[Short] = new Array(254 * 3), - private[this] var branchDataCount: Int = 0, - private[this] var values: Array[AnyRef] = new Array(255), // fixed size of 255 - private[this] var valueCount: Int = 0, - private[this] var trieIsPrivate: Boolean = false) { // signals the trie data can be mutated w/o having to copy first + val settings: HttpHeaderParser.Settings, + onIllegalHeader: ErrorInfo ⇒ Unit, + private[this] var nodes: Array[Char] = new Array(512), // initial size, can grow as needed + private[this] var nodeCount: Int = 0, + private[this] var branchData: Array[Short] = new Array(254 * 3), + private[this] var branchDataCount: Int = 0, + private[this] var values: Array[AnyRef] = new Array(255), // fixed size of 255 + private[this] var valueCount: Int = 0, + private[this] var trieIsPrivate: Boolean = false) { // signals the trie data can be mutated w/o having to copy first // TODO: evaluate whether switching to a value-class-based approach allows us to improve code readability without sacrificing performance @@ -300,7 +300,7 @@ private[engine] final class HttpHeaderParser private ( val prefixedLines = lines.zipWithIndex map { case (line, ix) ⇒ (if (ix < mainIx) p1 else if (ix > mainIx) p3 else p2) :: line } - prefixedLines -> mainIx + prefixedLines → mainIx } def branchLines(dataIx: Int, p1: String, p2: String, p3: String) = branchData(dataIx) match { case 0 ⇒ Seq.empty @@ -315,9 +315,9 @@ private[engine] final class HttpHeaderParser private ( case ValueBranch(_, valueParser, branchRootNodeIx, _) ⇒ val pad = " " * (valueParser.headerName.length + 3) recurseAndPrefixLines(branchRootNodeIx, pad, "(" + valueParser.headerName + ")-", pad) - case vp: HeaderValueParser ⇒ Seq(" (" :: vp.headerName :: ")" :: Nil) -> 0 - case value: RawHeader ⇒ Seq(" *" :: value.toString :: Nil) -> 0 - case value ⇒ Seq(" " :: value.toString :: Nil) -> 0 + case vp: HeaderValueParser ⇒ Seq(" (" :: vp.headerName :: ")" :: Nil) → 0 + case value: RawHeader ⇒ Seq(" *" :: value.toString :: Nil) → 0 + case value ⇒ Seq(" " :: value.toString :: Nil) → 0 } case nodeChar ⇒ val rix = rowIx(msb) @@ -350,7 +350,7 @@ private[engine] final class HttpHeaderParser private ( node >>> 8 match { case 0 ⇒ build(nodeIx + 1) case msb if (node & 0xFF) == 0 ⇒ values(msb - 1) match { - case ValueBranch(_, parser, _, count) ⇒ Map(parser.headerName -> count) + case ValueBranch(_, parser, _, count) ⇒ Map(parser.headerName → count) case _ ⇒ Map.empty } case msb ⇒ @@ -481,7 +481,7 @@ private[http] object HttpHeaderParser { onIllegalHeader(error.withSummaryPrepended(s"Illegal '$headerName' header")) RawHeader(headerName, trimmedHeaderValue) } - header -> endIx + header → endIx } } @@ -489,7 +489,7 @@ private[http] object HttpHeaderParser { extends HeaderValueParser(headerName, maxValueCount) { def apply(hhp: HttpHeaderParser, input: ByteString, valueStart: Int, onIllegalHeader: ErrorInfo ⇒ Unit): (HttpHeader, Int) = { val (headerValue, endIx) = scanHeaderValue(hhp, input, valueStart, valueStart + maxHeaderValueLength + 2)() - RawHeader(headerName, headerValue.trim) -> endIx + RawHeader(headerName, headerValue.trim) → endIx } } diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala index f54df29..64b0e6f 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpMessageParser.scala @@ -23,7 +23,7 @@ import ParserOutput._ /** * INTERNAL API */ -private[http] abstract class HttpMessageParser[Output >: MessageOutput <: ParserOutput](val settings: ParserSettings, +private[http] abstract class HttpMessageParser[Output >: MessageOutput <: ParserOutput](val settings: ParserSettings, val headerParser: HttpHeaderParser) { self ⇒ import HttpMessageParser._ import settings._ @@ -180,7 +180,7 @@ private[http] abstract class HttpMessageParser[Output >: MessageOutput <: Parser expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult def parseFixedLengthBody(remainingBodyBytes: Long, - isLastMessage: Boolean)(input: ByteString, bodyStart: Int): StateResult = { + isLastMessage: Boolean)(input: ByteString, bodyStart: Int): StateResult = { val remainingInputBytes = input.length - bodyStart if (remainingInputBytes > 0) { if (remainingInputBytes < remainingBodyBytes) { diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala index 4ca75b3..75a7ae6 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpRequestParser.scala @@ -18,9 +18,9 @@ import ParserOutput._ /** * INTERNAL API */ -private[http] class HttpRequestParser(_settings: ParserSettings, +private[http] class HttpRequestParser(_settings: ParserSettings, rawRequestUriHeader: Boolean, - _headerParser: HttpHeaderParser) + _headerParser: HttpHeaderParser) extends HttpMessageParser[RequestOutput](_settings, _headerParser) { import HttpMessageParser._ import settings._ @@ -114,7 +114,7 @@ private[http] class HttpRequestParser(_settings: ParserSettings, expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult = if (hostHeaderPresent || protocol == HttpProtocols.`HTTP/1.0`) { def emitRequestStart(createEntity: EntityCreator[RequestOutput, RequestEntity], - headers: List[HttpHeader] = headers) = { + headers: List[HttpHeader] = headers) = { val allHeaders0 = if (rawRequestUriHeader) `Raw-Request-URI`(new String(uriBytes, HttpCharsets.`US-ASCII`.nioCharset)) :: headers else headers diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala index 160f001..983587a 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/HttpResponseParser.scala @@ -83,7 +83,7 @@ private[http] class HttpResponseParser(_settings: ParserSettings, _headerParser: clh: Option[`Content-Length`], cth: Option[`Content-Type`], teh: Option[`Transfer-Encoding`], expect100continue: Boolean, hostHeaderPresent: Boolean, closeAfterResponseCompletion: Boolean): StateResult = { def emitResponseStart(createEntity: EntityCreator[ResponseOutput, ResponseEntity], - headers: List[HttpHeader] = headers) = + headers: List[HttpHeader] = headers) = emit(ResponseStart(statusCode, protocol, headers, createEntity, closeAfterResponseCompletion)) def finishEmptyResponse() = { emitResponseStart(emptyEntity(cth)) diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala index 0530010..278c712 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/ParserOutput.scala @@ -26,19 +26,19 @@ private[http] object ParserOutput { sealed trait ErrorOutput extends MessageOutput final case class RequestStart( - method: HttpMethod, - uri: Uri, - protocol: HttpProtocol, - headers: List[HttpHeader], - createEntity: EntityCreator[RequestOutput, RequestEntity], + method: HttpMethod, + uri: Uri, + protocol: HttpProtocol, + headers: List[HttpHeader], + createEntity: EntityCreator[RequestOutput, RequestEntity], expect100Continue: Boolean, - closeRequested: Boolean) extends MessageStart with RequestOutput + closeRequested: Boolean) extends MessageStart with RequestOutput final case class ResponseStart( - statusCode: StatusCode, - protocol: HttpProtocol, - headers: List[HttpHeader], - createEntity: EntityCreator[ResponseOutput, ResponseEntity], + statusCode: StatusCode, + protocol: HttpProtocol, + headers: List[HttpHeader], + createEntity: EntityCreator[ResponseOutput, ResponseEntity], closeRequested: Boolean) extends MessageStart with ResponseOutput case object MessageEnd extends MessageOutput diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala index 449f778..7d8cd3e 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/parsing/package.scala @@ -52,7 +52,7 @@ package parsing { * INTERNAL API */ private[parsing] class ParsingException(val status: StatusCode, - val info: ErrorInfo) extends RuntimeException(info.formatPretty) { + val info: ErrorInfo) extends RuntimeException(info.formatPretty) { def this(status: StatusCode, summary: String = "") = this(status, ErrorInfo(if (summary.isEmpty) status.defaultMessage else summary)) def this(summary: String) = diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala index 7f34106..d746fe3 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/BodyPartRenderer.scala @@ -25,10 +25,10 @@ import scala.concurrent.forkjoin.ThreadLocalRandom */ private[http] object BodyPartRenderer { - def streamed(boundary: String, - nioCharset: Charset, + def streamed(boundary: String, + nioCharset: Charset, partHeadersSizeHint: Int, - log: LoggingAdapter): PushPullStage[Multipart.BodyPart, Source[ChunkStreamPart, Any]] = + log: LoggingAdapter): PushPullStage[Multipart.BodyPart, Source[ChunkStreamPart, Any]] = new PushPullStage[Multipart.BodyPart, Source[ChunkStreamPart, Any]] { var firstBoundaryRendered = false diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala index fadfb89..b521ee9 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpRequestRendererFactory.scala @@ -19,9 +19,9 @@ import headers._ /** * INTERNAL API */ -private[http] class HttpRequestRendererFactory(userAgentHeader: Option[headers.`User-Agent`], +private[http] class HttpRequestRendererFactory(userAgentHeader: Option[headers.`User-Agent`], requestHeaderSizeHint: Int, - log: LoggingAdapter) { + log: LoggingAdapter) { import HttpRequestRendererFactory.RequestRenderingOutput def renderToSource(ctx: RequestRenderingContext): Source[ByteString, Any] = render(ctx).byteStream diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala index e3e46eb..a046f5e 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/HttpResponseRendererFactory.scala @@ -23,9 +23,9 @@ import headers._ /** * INTERNAL API */ -private[http] class HttpResponseRendererFactory(serverHeader: Option[headers.Server], +private[http] class HttpResponseRendererFactory(serverHeader: Option[headers.Server], responseHeaderSizeHint: Int, - log: LoggingAdapter) { + log: LoggingAdapter) { private val renderDefaultServerHeader: Rendering ⇒ Unit = serverHeader match { @@ -46,7 +46,7 @@ private[http] class HttpResponseRendererFactory(serverHeader: Option[headers.Ser val r = new ByteArrayRendering(48) DateTime(now).renderRfc1123DateTimeString(r ~~ headers.Date) ~~ CrLf cachedBytes = r.get - cachedDateHeader = cachedSeconds -> cachedBytes + cachedDateHeader = cachedSeconds → cachedBytes } cachedBytes } @@ -275,10 +275,10 @@ private[http] class HttpResponseRendererFactory(serverHeader: Option[headers.Ser * INTERNAL API */ private[http] final case class ResponseRenderingContext( - response: HttpResponse, - requestMethod: HttpMethod = HttpMethods.GET, + response: HttpResponse, + requestMethod: HttpMethod = HttpMethods.GET, requestProtocol: HttpProtocol = HttpProtocols.`HTTP/1.1`, - closeRequested: Boolean = false) + closeRequested: Boolean = false) /** INTERNAL API */ private[http] sealed trait ResponseRenderingOutput diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala index b624ef3..2388c54 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/rendering/RenderSupport.scala @@ -31,11 +31,10 @@ private object RenderSupport { val defaultLastChunkBytes: ByteString = renderChunk(HttpEntity.LastChunk) def CancelSecond[T, Mat](first: Source[T, Mat], second: Source[T, Any]): Source[T, Mat] = { - Source.fromGraph(GraphDSL.create(first) { implicit b ⇒ - frst ⇒ - import GraphDSL.Implicits._ - second ~> Sink.cancelled - SourceShape(frst.out) + Source.fromGraph(GraphDSL.create(first) { implicit b ⇒ frst ⇒ + import GraphDSL.Implicits._ + second ~> Sink.cancelled + SourceShape(frst.out) }) } diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala index 868b6be..46d76eb 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/server/HttpServerBluePrint.scala @@ -156,7 +156,7 @@ private[http] object HttpServerBluePrint { case StreamedEntityCreator(creator) ⇒ streamRequestEntity(creator) } - def streamRequestEntity(creator: (Source[ParserOutput.RequestOutput, NotUsed]) => RequestEntity): RequestEntity = { + def streamRequestEntity(creator: (Source[ParserOutput.RequestOutput, NotUsed]) ⇒ RequestEntity): RequestEntity = { // stream incoming chunks into the request entity until we reach the end of it // and then toggle back to "idle" @@ -252,7 +252,7 @@ private[http] object HttpServerBluePrint { } class RequestTimeoutSupport(initialTimeout: FiniteDuration) - extends GraphStage[BidiShape[HttpRequest, HttpRequest, HttpResponse, HttpResponse]] { + extends GraphStage[BidiShape[HttpRequest, HttpRequest, HttpResponse, HttpResponse]] { private val requestIn = Inlet[HttpRequest]("requestIn") private val requestOut = Outlet[HttpRequest]("requestOut") private val responseIn = Inlet[HttpResponse]("responseIn") @@ -301,14 +301,14 @@ private[http] object HttpServerBluePrint { } } - private class TimeoutSetup(val timeoutBase: Deadline, + private class TimeoutSetup(val timeoutBase: Deadline, val scheduledTask: Cancellable, - val timeout: Duration, - val handler: HttpRequest ⇒ HttpResponse) + val timeout: Duration, + val handler: HttpRequest ⇒ HttpResponse) private class TimeoutAccessImpl(request: HttpRequest, initialTimeout: FiniteDuration, requestEnd: Future[Unit], trigger: AsyncCallback[(TimeoutAccess, HttpResponse)], materializer: Materializer) - extends AtomicReference[Future[TimeoutSetup]] with TimeoutAccess with (HttpRequest ⇒ HttpResponse) { self ⇒ + extends AtomicReference[Future[TimeoutSetup]] with TimeoutAccess with (HttpRequest ⇒ HttpResponse) { self ⇒ import materializer.executionContext set { @@ -350,7 +350,7 @@ private[http] object HttpServerBluePrint { } class ControllerStage(settings: ServerSettings, log: LoggingAdapter) - extends GraphStage[BidiShape[RequestOutput, RequestOutput, HttpResponse, ResponseRenderingContext]] { + extends GraphStage[BidiShape[RequestOutput, RequestOutput, HttpResponse, ResponseRenderingContext]] { private val requestParsingIn = Inlet[RequestOutput]("requestParsingIn") private val requestPrepOut = Outlet[RequestOutput]("requestPrepOut") private val httpResponseIn = Inlet[HttpResponse]("httpResponseIn") @@ -532,7 +532,7 @@ private[http] object HttpServerBluePrint { One2OneBidiFlow[HttpRequest, HttpResponse](pipeliningLimit).reversed private class ProtocolSwitchStage(settings: ServerSettings, log: LoggingAdapter) - extends GraphStage[BidiShape[ResponseRenderingOutput, ByteString, SessionBytes, SessionBytes]] { + extends GraphStage[BidiShape[ResponseRenderingOutput, ByteString, SessionBytes, SessionBytes]] { private val fromNet = Inlet[SessionBytes]("fromNet") private val toNet = Outlet[ByteString]("toNet") diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala index 6282cd8..07df1e5 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/FrameEvent.scala @@ -42,22 +42,22 @@ private[http] final case class FrameData(data: ByteString, lastPart: Boolean) ex /** Model of the frame header */ private[http] final case class FrameHeader(opcode: Protocol.Opcode, - mask: Option[Int], + mask: Option[Int], length: Long, - fin: Boolean, - rsv1: Boolean = false, - rsv2: Boolean = false, - rsv3: Boolean = false) + fin: Boolean, + rsv1: Boolean = false, + rsv2: Boolean = false, + rsv3: Boolean = false) private[http] object FrameEvent { def empty(opcode: Protocol.Opcode, - fin: Boolean, - rsv1: Boolean = false, - rsv2: Boolean = false, - rsv3: Boolean = false): FrameStart = + fin: Boolean, + rsv1: Boolean = false, + rsv2: Boolean = false, + rsv3: Boolean = false): FrameStart = fullFrame(opcode, None, ByteString.empty, fin, rsv1, rsv2, rsv3) def fullFrame(opcode: Protocol.Opcode, mask: Option[Int], data: ByteString, - fin: Boolean, + fin: Boolean, rsv1: Boolean = false, rsv2: Boolean = false, rsv3: Boolean = false): FrameStart = diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala index 5a9e0ba..e9eb7be 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/Handshake.scala @@ -109,16 +109,12 @@ private[http] object Handshake { /* From: http://tools.ietf.org/html/rfc6455#section-4.2.2 - 1. A Status-Line with a 101 response code as per RFC 2616 [RFC2616]. Such a response could look like "HTTP/1.1 101 Switching Protocols". - 2. An |Upgrade| header field with value "websocket" as per RFC 2616 [RFC2616]. - 3. A |Connection| header field with value "Upgrade". - 4. A |Sec-WebSocket-Accept| header field. The value of this header field is constructed by concatenating /key/, defined above in step 4 in Section 4.2.2, with the string "258EAFA5- @@ -168,24 +164,20 @@ private[http] object Handshake { def validateResponse(response: HttpResponse, subprotocols: Seq[String], key: `Sec-WebSocket-Key`): Either[String, NegotiatedWebSocketSettings] = { /* From http://tools.ietf.org/html/rfc6455#section-4.1 - 1. If the status code received from the server is not 101, the client handles the response per HTTP [RFC2616] procedures. In particular, the client might perform authentication if it receives a 401 status code; the server might redirect the client using a 3xx status code (but clients are not required to follow them), etc. Otherwise, proceed as follows. - 2. If the response lacks an |Upgrade| header field or the |Upgrade| header field contains a value that is not an ASCII case- insensitive match for the value "websocket", the client MUST _Fail the WebSocket Connection_. - 3. If the response lacks a |Connection| header field or the |Connection| header field doesn't contain a token that is an ASCII case-insensitive match for the value "Upgrade", the client MUST _Fail the WebSocket Connection_. - 4. If the response lacks a |Sec-WebSocket-Accept| header field or the |Sec-WebSocket-Accept| contains a value other than the base64-encoded SHA-1 of the concatenation of the |Sec-WebSocket- @@ -193,7 +185,6 @@ private[http] object Handshake { E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and trailing whitespace, the client MUST _Fail the WebSocket Connection_. - 5. If the response includes a |Sec-WebSocket-Extensions| header field and this header field indicates the use of an extension that was not present in the client's handshake (the server has @@ -201,7 +192,6 @@ private[http] object Handshake { MUST _Fail the WebSocket Connection_. (The parsing of this header field to determine which extensions are requested is discussed in Section 9.1.) - 6. If the response includes a |Sec-WebSocket-Protocol| header field and this header field indicates the use of a subprotocol that was not present in the client's handshake (the server has indicated a diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala index df5b111..de330b4 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocket.scala @@ -26,10 +26,10 @@ private[http] object WebSocket { /** * A stack of all the higher WS layers between raw frames and the user API. */ - def stack(serverSide: Boolean, + def stack(serverSide: Boolean, maskingRandomFactory: () ⇒ Random, - closeTimeout: FiniteDuration = 3.seconds, - log: LoggingAdapter): BidiFlow[FrameEvent, Message, Message, FrameEvent, NotUsed] = + closeTimeout: FiniteDuration = 3.seconds, + log: LoggingAdapter): BidiFlow[FrameEvent, Message, Message, FrameEvent, NotUsed] = masking(serverSide, maskingRandomFactory) atop frameHandling(serverSide, closeTimeout, log) atop messageAPI(serverSide, closeTimeout) @@ -50,9 +50,9 @@ private[http] object WebSocket { * The layer that implements all low-level frame handling, like handling control frames, collecting messages * from frames, decoding text messages, close handling, etc. */ - def frameHandling(serverSide: Boolean = true, + def frameHandling(serverSide: Boolean = true, closeTimeout: FiniteDuration, - log: LoggingAdapter): BidiFlow[FrameEventOrError, FrameHandler.Output, FrameOutHandler.Input, FrameStart, NotUsed] = + log: LoggingAdapter): BidiFlow[FrameEventOrError, FrameHandler.Output, FrameOutHandler.Input, FrameStart, NotUsed] = BidiFlow.fromFlows( FrameHandler.create(server = serverSide), FrameOutHandler.create(serverSide, closeTimeout, log)) @@ -61,7 +61,7 @@ private[http] object WebSocket { /** * The layer that provides the high-level user facing API on top of frame handling. */ - def messageAPI(serverSide: Boolean, + def messageAPI(serverSide: Boolean, closeTimeout: FiniteDuration): BidiFlow[FrameHandler.Output, Message, Message, FrameOutHandler.Input, NotUsed] = { /* Completes this branch of the flow if no more messages are expected and converts close codes into errors */ class PrepareForUserHandler extends PushStage[MessagePart, MessagePart] { diff --git a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala index 2834c0c..f62187a 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/engine/ws/WebSocketClientBlueprint.scala @@ -33,9 +33,9 @@ object WebSocketClientBlueprint { /** * Returns a WebSocketClientLayer that can be materialized once. */ - def apply(request: WebSocketRequest, + def apply(request: WebSocketRequest, settings: ClientConnectionSettings, - log: LoggingAdapter): Http.WebSocketClientLayer = + log: LoggingAdapter): Http.WebSocketClientLayer = (simpleTls.atopMat(handshake(request, settings, log))(Keep.right) atop WebSocket.framing atop WebSocket.stack(serverSide = false, maskingRandomFactory = settings.websocketRandomFactory, log = log)).reversed @@ -44,9 +44,9 @@ object WebSocketClientBlueprint { * A bidi flow that injects and inspects the WS handshake and then goes out of the way. This BidiFlow * can only be materialized once. */ - def handshake(request: WebSocketRequest, + def handshake(request: WebSocketRequest, settings: ClientConnectionSettings, - log: LoggingAdapter): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[WebSocketUpgradeResponse]] = { + log: LoggingAdapter): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[WebSocketUpgradeResponse]] = { import request._ val result = Promise[WebSocketUpgradeResponse]() diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala index 7552cc1..faa7119 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/CommonRules.scala @@ -172,8 +172,8 @@ private[parser] trait CommonRules { this: Parser with StringBuilding ⇒ def `challenge-or-credentials`: Rule2[String, Seq[(String, String)]] = rule { `auth-scheme` ~ ( - oneOrMore(`auth-param` ~> (_ -> _)).separatedBy(listSep) - | `token68` ~> (x ⇒ ("" -> x) :: Nil) + oneOrMore(`auth-param` ~> (_ → _)).separatedBy(listSep) + | `token68` ~> (x ⇒ ("" → x) :: Nil) | push(Nil)) } @@ -389,7 +389,7 @@ private[parser] trait CommonRules { this: Parser with StringBuilding ⇒ token ~ zeroOrMore(ws(';') ~ `transfer-parameter`) ~> (_.toMap) ~> (TransferEncodings.Extension(_, _)) } - def `transfer-parameter` = rule { token ~ ws('=') ~ word ~> (_ -> _) } + def `transfer-parameter` = rule { token ~ ws('=') ~ word ~> (_ → _) } // ****************************************************************************************** // helpers diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala index 41df467..6c11098 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentDispositionHeader.scala @@ -22,7 +22,7 @@ private[parser] trait ContentDispositionHeader { this: Parser with CommonRules w def `disp-ext-type` = rule { token } - def `disposition-parm` = rule { (`filename-parm` | `disp-ext-parm`) ~> (_ -> _) } + def `disposition-parm` = rule { (`filename-parm` | `disp-ext-parm`) ~> (_ → _) } def `filename-parm` = rule( ignoreCase("filename") ~ OWS ~ ws('=') ~ push("filename") ~ word diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala index b54990c..587f76a 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/ContentTypeHeader.scala @@ -15,11 +15,11 @@ private[parser] trait ContentTypeHeader { this: Parser with CommonRules with Com `media-type` ~ EOI ~> ((main, sub, params) ⇒ headers.`Content-Type`(contentType(main, sub, params))) } - @tailrec private def contentType(main: String, - sub: String, - params: Seq[(String, String)], - charset: Option[HttpCharset] = None, - builder: StringMapBuilder = null): ContentType = + @tailrec private def contentType(main: String, + sub: String, + params: Seq[(String, String)], + charset: Option[HttpCharset] = None, + builder: StringMapBuilder = null): ContentType = params match { case Nil ⇒ val parameters = if (builder eq null) Map.empty[String, String] else builder.result() diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala index dcc0bcc..b4414c8 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/HeaderParser.scala @@ -161,7 +161,7 @@ private[http] object HeaderParser { def uriParsingMode: Uri.ParsingMode def cookieParsingMode: ParserSettings.CookieParsingMode } - def Settings(uriParsingMode: Uri.ParsingMode = Uri.ParsingMode.Relaxed, + def Settings(uriParsingMode: Uri.ParsingMode = Uri.ParsingMode.Relaxed, cookieParsingMode: ParserSettings.CookieParsingMode = ParserSettings.CookieParsingMode.RFC6265): Settings = { val _uriParsingMode = uriParsingMode val _cookieParsingMode = cookieParsingMode diff --git a/akka-http-core/src/main/scala/akka/http/impl/model/parser/IpAddressParsing.scala b/akka-http-core/src/main/scala/akka/http/impl/model/parser/IpAddressParsing.scala index 64d3efc..d5b57bd 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/model/parser/IpAddressParsing.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/model/parser/IpAddressParsing.scala @@ -24,10 +24,10 @@ private[parser] trait IpAddressParsing { this: Parser ⇒ def `ip-v6-address`: Rule1[Array[Byte]] = { import CharUtils.{ hexValue ⇒ hv } var a: Array[Byte] = null - def zero(ix: Int) = rule { run(a(ix) = 0.toByte) } + def zero(ix: Int) = rule { run(a(ix)= 0.toByte) } def zero2(ix: Int) = rule { run { a(ix) = 0.toByte; a(ix + 1) = 0.toByte; } } - def h4(ix: Int) = rule { HEXDIG ~ run(a(ix) = hv(lastChar).toByte) } - def h8(ix: Int) = rule { HEXDIG ~ HEXDIG ~ run(a(ix) = (hv(charAt(-2)) * 16 + hv(lastChar)).toByte) } + def h4(ix: Int) = rule { HEXDIG ~ run(a(ix)= hv(lastChar).toByte) } + def h8(ix: Int) = rule { HEXDIG ~ HEXDIG ~ run(a(ix)= (hv(charAt(-2)) * 16 + hv(lastChar)).toByte) } def h16(ix: Int) = rule { h8(ix) ~ h8(ix + 1) | h4(ix) ~ h8(ix + 1) | zero(ix) ~ h8(ix + 1) | zero(ix) ~ h4(ix + 1) } def h16c(ix: Int) = rule { h16(ix) ~ ':' ~ !':' } def ch16o(ix: Int) = rule { optional(':' ~ !':') ~ (h16(ix) | zero2(ix)) } diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala index 1f8e4d4..fbe9427 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ClientConnectionSettingsImpl.scala @@ -18,13 +18,13 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } /** INTERNAL API */ private[akka] final case class ClientConnectionSettingsImpl( - userAgentHeader: Option[`User-Agent`], - connectingTimeout: FiniteDuration, - idleTimeout: Duration, - requestHeaderSizeHint: Int, + userAgentHeader: Option[`User-Agent`], + connectingTimeout: FiniteDuration, + idleTimeout: Duration, + requestHeaderSizeHint: Int, websocketRandomFactory: () ⇒ Random, - socketOptions: immutable.Seq[SocketOption], - parserSettings: ParserSettings) + socketOptions: immutable.Seq[SocketOption], + parserSettings: ParserSettings) extends akka.http.scaladsl.settings.ClientConnectionSettings { require(connectingTimeout >= Duration.Zero, "connectingTimeout must be >= 0") diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala index dfa0a63..dc44110 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSettingsImpl.scala @@ -12,11 +12,11 @@ import scala.concurrent.duration.Duration /** INTERNAL API */ private[akka] final case class ConnectionPoolSettingsImpl( - val maxConnections: Int, - val maxRetries: Int, - val maxOpenRequests: Int, - val pipeliningLimit: Int, - val idleTimeout: Duration, + val maxConnections: Int, + val maxRetries: Int, + val maxOpenRequests: Int, + val pipeliningLimit: Int, + val idleTimeout: Duration, val connectionSettings: ClientConnectionSettings) extends ConnectionPoolSettings { diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala index 6eed86d..a7ad0c8 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ConnectionPoolSetup.scala @@ -10,6 +10,6 @@ import akka.http.scaladsl.settings.ConnectionPoolSettings /** INTERNAL API */ private[akka] final case class ConnectionPoolSetup( - settings: ConnectionPoolSettings, - connectionContext: ConnectionContext = ConnectionContext.noEncryption(), - log: LoggingAdapter) \ No newline at end of file + settings: ConnectionPoolSettings, + connectionContext: ConnectionContext = ConnectionContext.noEncryption(), + log: LoggingAdapter) \ No newline at end of file diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala index 642e9b1..cec220a 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ParserSettingsImpl.scala @@ -13,23 +13,23 @@ import akka.http.impl.util._ /** INTERNAL API */ private[akka] final case class ParserSettingsImpl( - maxUriLength: Int, - maxMethodLength: Int, - maxResponseReasonLength: Int, - maxHeaderNameLength: Int, - maxHeaderValueLength: Int, - maxHeaderCount: Int, - maxContentLength: Long, - maxChunkExtLength: Int, - maxChunkSize: Int, - uriParsingMode: Uri.ParsingMode, - cookieParsingMode: CookieParsingMode, - illegalHeaderWarnings: Boolean, - errorLoggingVerbosity: ParserSettings.ErrorLoggingVerbosity, - headerValueCacheLimits: Map[String, Int], + maxUriLength: Int, + maxMethodLength: Int, + maxResponseReasonLength: Int, + maxHeaderNameLength: Int, + maxHeaderValueLength: Int, + maxHeaderCount: Int, + maxContentLength: Long, + maxChunkExtLength: Int, + maxChunkSize: Int, + uriParsingMode: Uri.ParsingMode, + cookieParsingMode: CookieParsingMode, + illegalHeaderWarnings: Boolean, + errorLoggingVerbosity: ParserSettings.ErrorLoggingVerbosity, + headerValueCacheLimits: Map[String, Int], includeTlsSessionInfoHeader: Boolean, - customMethods: String ⇒ Option[HttpMethod], - customStatusCodes: Int ⇒ Option[StatusCode]) + customMethods: String ⇒ Option[HttpMethod], + customStatusCodes: Int ⇒ Option[StatusCode]) extends akka.http.scaladsl.settings.ParserSettings { require(maxUriLength > 0, "max-uri-length must be > 0") @@ -74,7 +74,7 @@ object ParserSettingsImpl extends SettingsCompanion[ParserSettingsImpl]("akka.ht CookieParsingMode(c getString "cookie-parsing-mode"), c getBoolean "illegal-header-warnings", ErrorLoggingVerbosity(c getString "error-logging-verbosity"), - cacheConfig.entrySet.asScala.map(kvp ⇒ kvp.getKey -> cacheConfig.getInt(kvp.getKey))(collection.breakOut), + cacheConfig.entrySet.asScala.map(kvp ⇒ kvp.getKey → cacheConfig.getInt(kvp.getKey))(collection.breakOut), c getBoolean "tls-session-info-header", noCustomMethods, noCustomStatusCodes) diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala index de55b0c..8abd0ea 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/settings/RoutingSettingsImpl.scala @@ -9,13 +9,13 @@ import com.typesafe.config.Config /** INTERNAL API */ final case class RoutingSettingsImpl( - verboseErrorMessages: Boolean, - fileGetConditional: Boolean, - renderVanityFooter: Boolean, - rangeCountLimit: Int, + verboseErrorMessages: Boolean, + fileGetConditional: Boolean, + renderVanityFooter: Boolean, + rangeCountLimit: Int, rangeCoalescingThreshold: Long, - decodeMaxBytesPerChunk: Int, - fileIODispatcher: String) extends akka.http.scaladsl.settings.RoutingSettings { + decodeMaxBytesPerChunk: Int, + fileIODispatcher: String) extends akka.http.scaladsl.settings.RoutingSettings { override def productPrefix = "RoutingSettings" } diff --git a/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala b/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala index cac6fbf..105cc97 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/settings/ServerSettingsImpl.scala @@ -25,20 +25,20 @@ import akka.http.scaladsl.model.headers.{ Host, Server } /** INTERNAL API */ private[akka] final case class ServerSettingsImpl( - serverHeader: Option[Server], - timeouts: ServerSettings.Timeouts, - maxConnections: Int, - pipeliningLimit: Int, - remoteAddressHeader: Boolean, - rawRequestUriHeader: Boolean, + serverHeader: Option[Server], + timeouts: ServerSettings.Timeouts, + maxConnections: Int, + pipeliningLimit: Int, + remoteAddressHeader: Boolean, + rawRequestUriHeader: Boolean, transparentHeadRequests: Boolean, - verboseErrorMessages: Boolean, - responseHeaderSizeHint: Int, - backlog: Int, - socketOptions: immutable.Seq[SocketOption], - defaultHostHeader: Host, - websocketRandomFactory: () ⇒ Random, - parserSettings: ParserSettings) extends ServerSettings { + verboseErrorMessages: Boolean, + responseHeaderSizeHint: Int, + backlog: Int, + socketOptions: immutable.Seq[SocketOption], + defaultHostHeader: Host, + websocketRandomFactory: () ⇒ Random, + parserSettings: ParserSettings) extends ServerSettings { require(0 < maxConnections, "max-connections must be > 0") require(0 < pipeliningLimit && pipeliningLimit <= 1024, "pipelining-limit must be > 0 and <= 1024") @@ -53,9 +53,9 @@ object ServerSettingsImpl extends SettingsCompanion[ServerSettingsImpl]("akka.ht /** INTERNAL API */ final case class Timeouts( - idleTimeout: Duration, + idleTimeout: Duration, requestTimeout: Duration, - bindTimeout: FiniteDuration) extends ServerSettings.Timeouts { + bindTimeout: FiniteDuration) extends ServerSettings.Timeouts { require(idleTimeout > Duration.Zero, "idleTimeout must be infinite or > 0") require(requestTimeout > Duration.Zero, "requestTimeout must be infinite or > 0") require(bindTimeout > Duration.Zero, "bindTimeout must be > 0") diff --git a/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala b/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala index e125dce..df8b5f2 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/util/SettingsCompanion.scala @@ -54,6 +54,6 @@ private[http] object SettingsCompanion { val localHostName = try new InetSocketAddress(InetAddress.getLocalHost, 80).getHostString catch { case NonFatal(_) ⇒ "" } - ConfigFactory.parseMap(Map("akka.http.hostname" -> localHostName).asJava) + ConfigFactory.parseMap(Map("akka.http.hostname" → localHostName).asJava) } } \ No newline at end of file diff --git a/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala b/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala index cf21451..06745ba 100644 --- a/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala +++ b/akka-http-core/src/main/scala/akka/http/impl/util/StreamUtils.scala @@ -77,7 +77,7 @@ private[http] object StreamUtils { ctx.fail(cause) } } - source.transform(() ⇒ transformer) -> promise.future + source.transform(() ⇒ transformer) → promise.future } def sliceBytesTransformer(start: Long, length: Long): Flow[ByteString, ByteString, NotUsed] = { diff --git a/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala b/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala index 985dcb4..e5061b1 100644 --- a/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala +++ b/akka-http-core/src/main/scala/akka/http/javadsl/Http.scala @@ -54,7 +54,7 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * Constructs a server layer stage using the given [[akka.http.javadsl.settings.ServerSettings]]. The returned [[BidiFlow]] isn't reusable and * can only be materialized once. */ - def serverLayer(settings: ServerSettings, + def serverLayer(settings: ServerSettings, materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] = adaptServerLayer(delegate.serverLayer(settings.asScala)(materializer)) @@ -63,9 +63,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * can only be materialized once. The `remoteAddress`, if provided, will be added as a header to each [[HttpRequest]] * this layer produces if the `akka.http.server.remote-address-header` configuration option is enabled. */ - def serverLayer(settings: ServerSettings, + def serverLayer(settings: ServerSettings, remoteAddress: Optional[InetSocketAddress], - materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] = + materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] = adaptServerLayer(delegate.serverLayer(settings.asScala, remoteAddress.asScala)(materializer)) /** @@ -73,10 +73,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * can only be materialized once. The remoteAddress, if provided, will be added as a header to each [[HttpRequest]] * this layer produces if the `akka.http.server.remote-address-header` configuration option is enabled. */ - def serverLayer(settings: ServerSettings, + def serverLayer(settings: ServerSettings, remoteAddress: Optional[InetSocketAddress], - log: LoggingAdapter, - materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] = + log: LoggingAdapter, + materializer: Materializer): BidiFlow[HttpResponse, SslTlsOutbound, SslTlsInbound, HttpRequest, NotUsed] = adaptServerLayer(delegate.serverLayer(settings.asScala, remoteAddress.asScala, log)(materializer)) /** @@ -116,8 +116,8 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bind(connect: ConnectHttp, - settings: ServerSettings, + def bind(connect: ConnectHttp, + settings: ServerSettings, materializer: Materializer): Source[IncomingConnection, CompletionStage[ServerBinding]] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala new Source(delegate.bind(connect.host, connect.port, settings = settings.asScala, connectionContext = connectionContext)(materializer) @@ -140,9 +140,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bind(connect: ConnectHttp, - settings: ServerSettings, - log: LoggingAdapter, + def bind(connect: ConnectHttp, + settings: ServerSettings, + log: LoggingAdapter, materializer: Materializer): Source[IncomingConnection, CompletionStage[ServerBinding]] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala new Source(delegate.bind(connect.host, connect.port, connectionContext, settings.asScala, log)(materializer) @@ -160,8 +160,8 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, _], - connect: ConnectHttp, + def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, _], + connect: ConnectHttp, materializer: Materializer): CompletionStage[ServerBinding] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala delegate.bindAndHandle(handler.asInstanceOf[Flow[sm.HttpRequest, sm.HttpResponse, _]].asScala, @@ -179,10 +179,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, _], - connect: ConnectHttp, - settings: ServerSettings, - log: LoggingAdapter, + def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, _], + connect: ConnectHttp, + settings: ServerSettings, + log: LoggingAdapter, materializer: Materializer): CompletionStage[ServerBinding] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala delegate.bindAndHandle(handler.asInstanceOf[Flow[sm.HttpRequest, sm.HttpResponse, _]].asScala, @@ -200,8 +200,8 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bindAndHandleSync(handler: Function[HttpRequest, HttpResponse], - connect: ConnectHttp, + def bindAndHandleSync(handler: Function[HttpRequest, HttpResponse], + connect: ConnectHttp, materializer: Materializer): CompletionStage[ServerBinding] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala delegate.bindAndHandleSync(handler.apply(_).asScala, connect.host, connect.port, connectionContext)(materializer) @@ -218,10 +218,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bindAndHandleSync(handler: Function[HttpRequest, HttpResponse], - connect: ConnectHttp, - settings: ServerSettings, - log: LoggingAdapter, + def bindAndHandleSync(handler: Function[HttpRequest, HttpResponse], + connect: ConnectHttp, + settings: ServerSettings, + log: LoggingAdapter, materializer: Materializer): CompletionStage[ServerBinding] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala delegate.bindAndHandleSync(handler.apply(_).asScala, @@ -239,8 +239,8 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bindAndHandleAsync(handler: Function[HttpRequest, CompletionStage[HttpResponse]], - connect: ConnectHttp, + def bindAndHandleAsync(handler: Function[HttpRequest, CompletionStage[HttpResponse]], + connect: ConnectHttp, materializer: Materializer): CompletionStage[ServerBinding] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala delegate.bindAndHandleAsync(handler.apply(_).toScala, connect.host, connect.port, connectionContext)(materializer) @@ -257,9 +257,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * The server will be bound using HTTPS if the [[ConnectHttp]] object is configured with an [[HttpsConnectionContext]], * or the [[defaultServerHttpContext]] has been configured to be an [[HttpsConnectionContext]]. */ - def bindAndHandleAsync(handler: Function[HttpRequest, CompletionStage[HttpResponse]], - connect: ConnectHttp, - settings: ServerSettings, + def bindAndHandleAsync(handler: Function[HttpRequest, CompletionStage[HttpResponse]], + connect: ConnectHttp, + settings: ServerSettings, parallelism: Int, log: LoggingAdapter, materializer: Materializer): CompletionStage[ServerBinding] = { val connectionContext = connect.effectiveConnectionContext(defaultServerHttpContext).asScala @@ -278,15 +278,15 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * Constructs a client layer stage using the given [[akka.http.javadsl.settings.ClientConnectionSettings]]. */ def clientLayer(hostHeader: headers.Host, - settings: ClientConnectionSettings): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] = + settings: ClientConnectionSettings): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] = adaptClientLayer(delegate.clientLayer(JavaMapping.toScala(hostHeader), settings.asScala)) /** * Constructs a client layer stage using the given [[ClientConnectionSettings]]. */ def clientLayer(hostHeader: headers.Host, - settings: ClientConnectionSettings, - log: LoggingAdapter): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] = + settings: ClientConnectionSettings, + log: LoggingAdapter): BidiFlow[HttpRequest, SslTlsOutbound, SslTlsInbound, HttpResponse, NotUsed] = adaptClientLayer(delegate.clientLayer(JavaMapping.toScala(hostHeader), settings.asScala, log)) /** @@ -314,10 +314,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * Creates a [[Flow]] representing a prospective HTTP client connection to the given endpoint. * Every materialization of the produced flow will attempt to establish a new outgoing connection. */ - def outgoingConnection(to: ConnectHttp, + def outgoingConnection(to: ConnectHttp, localAddress: Optional[InetSocketAddress], - settings: ClientConnectionSettings, - log: LoggingAdapter): Flow[HttpRequest, HttpResponse, CompletionStage[OutgoingConnection]] = + settings: ClientConnectionSettings, + log: LoggingAdapter): Flow[HttpRequest, HttpResponse, CompletionStage[OutgoingConnection]] = adaptOutgoingFlow { if (to.isHttps) delegate.outgoingConnectionHttps(to.host, to.port, to.effectiveConnectionContext(defaultClientHttpsContext).asInstanceOf[HttpsConnectionContext].asScala, localAddress.asScala, settings.asScala, log) @@ -364,9 +364,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * * The given [[ConnectionContext]] will be used for encryption on the connection. */ - def newHostConnectionPool[T](to: ConnectHttp, + def newHostConnectionPool[T](to: ConnectHttp, settings: ConnectionPoolSettings, - log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] = + log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] = adaptTupleFlow { to.effectiveHttpsConnectionContext(defaultClientHttpsContext) match { case https: HttpsConnectionContext ⇒ @@ -423,9 +423,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * * The given [[ConnectionContext]] will be used for encryption on the connection. */ - def cachedHostConnectionPool[T](to: ConnectHttp, + def cachedHostConnectionPool[T](to: ConnectHttp, settings: ConnectionPoolSettings, - log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] = + log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], HostConnectionPool] = adaptTupleFlow(delegate.cachedHostConnectionPoolHttps[T](to.host, to.port, to.effectiveHttpsConnectionContext(defaultClientHttpsContext).asScala, settings.asScala, log)(materializer) .mapMaterializedValue(_.toJava)) @@ -459,9 +459,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * In order to allow for easy response-to-request association the flow takes in a custom, opaque context * object of type `T` from the application which is emitted together with the corresponding response. */ - def superPool[T](settings: ConnectionPoolSettings, + def superPool[T](settings: ConnectionPoolSettings, connectionContext: HttpsConnectionContext, - log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] = + log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] = adaptTupleFlow(delegate.superPool[T](connectionContext.asScala, settings.asScala, log)(materializer)) /** @@ -480,7 +480,7 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * object of type `T` from the application which is emitted together with the corresponding response. */ def superPool[T](settings: ConnectionPoolSettings, - log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] = + log: LoggingAdapter, materializer: Materializer): Flow[Pair[HttpRequest, T], Pair[Try[HttpResponse], T], NotUsed] = adaptTupleFlow(delegate.superPool[T](defaultClientHttpsContext.asScala, settings.asScala, log)(materializer)) /** @@ -516,10 +516,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * Note that the request must have either an absolute URI or a valid `Host` header, otherwise * the future will be completed with an error. */ - def singleRequest(request: HttpRequest, + def singleRequest(request: HttpRequest, connectionContext: HttpsConnectionContext, - settings: ConnectionPoolSettings, - log: LoggingAdapter, materializer: Materializer): CompletionStage[HttpResponse] = + settings: ConnectionPoolSettings, + log: LoggingAdapter, materializer: Materializer): CompletionStage[HttpResponse] = delegate.singleRequest(request.asScala, connectionContext.asScala, settings.asScala, log)(materializer).toJava /** @@ -536,7 +536,7 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * * The layer is not reusable and must only be materialized once. */ - def webSocketClientLayer(request: WebSocketRequest, + def webSocketClientLayer(request: WebSocketRequest, settings: ClientConnectionSettings): BidiFlow[Message, SslTlsOutbound, SslTlsInbound, Message, CompletionStage[WebSocketUpgradeResponse]] = adaptWsBidiFlow(delegate.webSocketClientLayer(request.asScala, settings.asScala)) @@ -546,9 +546,9 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * * The layer is not reusable and must only be materialized once. */ - def webSocketClientLayer(request: WebSocketRequest, + def webSocketClientLayer(request: WebSocketRequest, settings: ClientConnectionSettings, - log: LoggingAdapter): BidiFlow[Message, SslTlsOutbound, SslTlsInbound, Message, CompletionStage[WebSocketUpgradeResponse]] = + log: LoggingAdapter): BidiFlow[Message, SslTlsOutbound, SslTlsInbound, Message, CompletionStage[WebSocketUpgradeResponse]] = adaptWsBidiFlow(delegate.webSocketClientLayer(request.asScala, settings.asScala, log)) /** @@ -566,11 +566,11 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * * The layer is not reusable and must only be materialized once. */ - def webSocketClientFlow(request: WebSocketRequest, + def webSocketClientFlow(request: WebSocketRequest, connectionContext: ConnectionContext, - localAddress: Optional[InetSocketAddress], - settings: ClientConnectionSettings, - log: LoggingAdapter): Flow[Message, Message, CompletionStage[WebSocketUpgradeResponse]] = + localAddress: Optional[InetSocketAddress], + settings: ClientConnectionSettings, + log: LoggingAdapter): Flow[Message, Message, CompletionStage[WebSocketUpgradeResponse]] = adaptWsFlow { delegate.webSocketClientFlow(request.asScala, connectionContext.asScala, localAddress.asScala, settings.asScala, log) } @@ -581,8 +581,8 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * * The [[defaultClientHttpsContext]] is used to configure TLS for the connection. */ - def singleWebSocketRequest[T](request: WebSocketRequest, - clientFlow: Flow[Message, Message, T], + def singleWebSocketRequest[T](request: WebSocketRequest, + clientFlow: Flow[Message, Message, T], materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] = adaptWsResultTuple { delegate.singleWebSocketRequest( @@ -596,10 +596,10 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * * The [[defaultClientHttpsContext]] is used to configure TLS for the connection. */ - def singleWebSocketRequest[T](request: WebSocketRequest, - clientFlow: Flow[Message, Message, T], + def singleWebSocketRequest[T](request: WebSocketRequest, + clientFlow: Flow[Message, Message, T], connectionContext: ConnectionContext, - materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] = + materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] = adaptWsResultTuple { delegate.singleWebSocketRequest( request.asScala, @@ -611,13 +611,13 @@ class Http(system: ExtendedActorSystem) extends akka.actor.Extension { * Runs a single WebSocket conversation given a Uri and a flow that represents the client side of the * WebSocket conversation. */ - def singleWebSocketRequest[T](request: WebSocketRequest, - clientFlow: Flow[Message, Message, T], + def singleWebSocketRequest[T](request: WebSocketRequest, + clientFlow: Flow[Message, Message, T], connectionContext: ConnectionContext, - localAddress: Optional[InetSocketAddress], - settings: ClientConnectionSettings, - log: LoggingAdapter, - materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] = + localAddress: Optional[InetSocketAddress], + settings: ClientConnectionSettings, + log: LoggingAdapter, + materializer: Materializer): Pair[CompletionStage[WebSocketUpgradeResponse], T] = adaptWsResultTuple { delegate.singleWebSocketRequest( request.asScala, diff --git a/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala b/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala index 30d0c59..5a13afc 100644 --- a/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala +++ b/akka-http-core/src/main/scala/akka/http/javadsl/settings/ParserSettings.scala @@ -60,12 +60,12 @@ abstract class ParserSettings private[akka] () extends BodyPartParser.Settings { @varargs def withCustomMethods(methods: HttpMethod*): ParserSettings = { - val map = methods.map(m ⇒ m.name -> m.asScala).toMap + val map = methods.map(m ⇒ m.name → m.asScala).toMap self.copy(customMethods = map.get) } @varargs def withCustomStatusCodes(codes: StatusCode*): ParserSettings = { - val map = codes.map(c ⇒ c.intValue -> c.asScala).toMap + val map = codes.map(c ⇒ c.intValue → c.asScala).toMap self.copy(customStatusCodes = map.get) } diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala index 4bfd7ec..4b1eb70 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/ConnectionContext.scala @@ -20,11 +20,11 @@ trait ConnectionContext extends akka.http.javadsl.ConnectionContext { object ConnectionContext { //#https-context-creation - def https(sslContext: SSLContext, + def https(sslContext: SSLContext, enabledCipherSuites: Option[immutable.Seq[String]] = None, - enabledProtocols: Option[immutable.Seq[String]] = None, - clientAuth: Option[TLSClientAuth] = None, - sslParameters: Option[SSLParameters] = None) = { + enabledProtocols: Option[immutable.Seq[String]] = None, + clientAuth: Option[TLSClientAuth] = None, + sslParameters: Option[SSLParameters] = None) = { new HttpsConnectionContext(sslContext, enabledCipherSuites, enabledProtocols, clientAuth, sslParameters) } //#https-context-creation @@ -33,11 +33,11 @@ object ConnectionContext { } final class HttpsConnectionContext( - val sslContext: SSLContext, + val sslContext: SSLContext, val enabledCipherSuites: Option[immutable.Seq[String]] = None, - val enabledProtocols: Option[immutable.Seq[String]] = None, - val clientAuth: Option[TLSClientAuth] = None, - val sslParameters: Option[SSLParameters] = None) + val enabledProtocols: Option[immutable.Seq[String]] = None, + val clientAuth: Option[TLSClientAuth] = None, + val sslParameters: Option[SSLParameters] = None) extends akka.http.javadsl.HttpsConnectionContext with ConnectionContext { def firstSession = NegotiateNewSession(enabledCipherSuites, enabledProtocols, clientAuth, sslParameters) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala index 4e71874..4332fff 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/Http.scala @@ -76,8 +76,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte */ def bind(interface: String, port: Int = DefaultPortForProtocol, connectionContext: ConnectionContext = defaultServerHttpContext, - settings: ServerSettings = ServerSettings(system), - log: LoggingAdapter = system.log)(implicit fm: Materializer): Source[IncomingConnection, Future[ServerBinding]] = { + settings: ServerSettings = ServerSettings(system), + log: LoggingAdapter = system.log)(implicit fm: Materializer): Source[IncomingConnection, Future[ServerBinding]] = { val effectivePort = if (port >= 0) port else connectionContext.defaultPort val tlsStage = sslTlsStage(connectionContext, Server) val connections: Source[Tcp.IncomingConnection, Future[Tcp.ServerBinding]] = @@ -102,11 +102,11 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * To configure additional settings for a server started using this method, * use the `akka.http.server` config section or pass in a [[ServerSettings]] explicitly. */ - def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, Any], + def bindAndHandle(handler: Flow[HttpRequest, HttpResponse, Any], interface: String, port: Int = DefaultPortForProtocol, connectionContext: ConnectionContext = defaultServerHttpContext, - settings: ServerSettings = ServerSettings(system), - log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = { + settings: ServerSettings = ServerSettings(system), + log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = { def handleOneConnection(incomingConnection: IncomingConnection): Future[Unit] = try incomingConnection.flow @@ -143,11 +143,11 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * To configure additional settings for a server started using this method, * use the `akka.http.server` config section or pass in a [[ServerSettings]] explicitly. */ - def bindAndHandleSync(handler: HttpRequest ⇒ HttpResponse, + def bindAndHandleSync(handler: HttpRequest ⇒ HttpResponse, interface: String, port: Int = DefaultPortForProtocol, connectionContext: ConnectionContext = defaultServerHttpContext, - settings: ServerSettings = ServerSettings(system), - log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = + settings: ServerSettings = ServerSettings(system), + log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = bindAndHandle(Flow[HttpRequest].map(handler), interface, port, connectionContext, settings, log) /** @@ -160,12 +160,12 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * To configure additional settings for a server started using this method, * use the `akka.http.server` config section or pass in a [[ServerSettings]] explicitly. */ - def bindAndHandleAsync(handler: HttpRequest ⇒ Future[HttpResponse], + def bindAndHandleAsync(handler: HttpRequest ⇒ Future[HttpResponse], interface: String, port: Int = DefaultPortForProtocol, connectionContext: ConnectionContext = defaultServerHttpContext, - settings: ServerSettings = ServerSettings(system), - parallelism: Int = 1, - log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = + settings: ServerSettings = ServerSettings(system), + parallelism: Int = 1, + log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[ServerBinding] = bindAndHandle(Flow[HttpRequest].mapAsync(parallelism)(handler), interface, port, connectionContext, settings, log) type ServerLayer = Http.ServerLayer @@ -183,9 +183,9 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * can only be materialized once. The `remoteAddress`, if provided, will be added as a header to each [[HttpRequest]] * this layer produces if the `akka.http.server.remote-address-header` configuration option is enabled. */ - def serverLayer(settings: ServerSettings, + def serverLayer(settings: ServerSettings, remoteAddress: Option[InetSocketAddress] = None, - log: LoggingAdapter = system.log)(implicit mat: Materializer): ServerLayer = + log: LoggingAdapter = system.log)(implicit mat: Materializer): ServerLayer = HttpServerBluePrint(settings, remoteAddress, log) // ** CLIENT ** // @@ -199,8 +199,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte */ def outgoingConnection(host: String, port: Int = 80, localAddress: Option[InetSocketAddress] = None, - settings: ClientConnectionSettings = ClientConnectionSettings(system), - log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = + settings: ClientConnectionSettings = ClientConnectionSettings(system), + log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = _outgoingConnection(host, port, localAddress, settings, ConnectionContext.noEncryption(), log) /** @@ -213,18 +213,18 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * use the `akka.http.client` config section or pass in a [[ClientConnectionSettings]] explicitly. */ def outgoingConnectionHttps(host: String, port: Int = 443, - connectionContext: HttpsConnectionContext = defaultClientHttpsContext, - localAddress: Option[InetSocketAddress] = None, - settings: ClientConnectionSettings = ClientConnectionSettings(system), - log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = + connectionContext: HttpsConnectionContext = defaultClientHttpsContext, + localAddress: Option[InetSocketAddress] = None, + settings: ClientConnectionSettings = ClientConnectionSettings(system), + log: LoggingAdapter = system.log): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = _outgoingConnection(host, port, localAddress, settings, connectionContext, log) - private def _outgoingConnection(host: String, - port: Int, - localAddress: Option[InetSocketAddress], - settings: ClientConnectionSettings, + private def _outgoingConnection(host: String, + port: Int, + localAddress: Option[InetSocketAddress], + settings: ClientConnectionSettings, connectionContext: ConnectionContext, - log: LoggingAdapter): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = { + log: LoggingAdapter): Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]] = { val hostHeader = if (port == connectionContext.defaultPort) Host(host) else Host(host, port) val layer = clientLayer(hostHeader, settings, log) layer.joinMat(_outgoingTlsConnectionLayer(host, port, localAddress, settings, connectionContext, log))(Keep.right) @@ -233,7 +233,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte private def _outgoingTlsConnectionLayer(host: String, port: Int, localAddress: Option[InetSocketAddress], settings: ClientConnectionSettings, connectionContext: ConnectionContext, log: LoggingAdapter): Flow[SslTlsOutbound, SslTlsInbound, Future[OutgoingConnection]] = { - val tlsStage = sslTlsStage(connectionContext, Client, Some(host -> port)) + val tlsStage = sslTlsStage(connectionContext, Client, Some(host → port)) val transportFlow = Tcp().outgoingConnection(new InetSocketAddress(host, port), localAddress, settings.socketOptions, halfClose = true, settings.connectingTimeout, settings.idleTimeout) @@ -256,8 +256,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * Constructs a [[ClientLayer]] stage using the given [[ClientConnectionSettings]]. */ def clientLayer(hostHeader: Host, - settings: ClientConnectionSettings, - log: LoggingAdapter = system.log): ClientLayer = + settings: ClientConnectionSettings, + log: LoggingAdapter = system.log): ClientLayer = OutgoingConnectionBlueprint(hostHeader, settings, log) // ** CONNECTION POOL ** // @@ -281,7 +281,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte */ def newHostConnectionPool[T](host: String, port: Int = 80, settings: ConnectionPoolSettings = defaultConnectionPoolSettings, - log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { + log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { val cps = ConnectionPoolSetup(settings, ConnectionContext.noEncryption(), log) newHostConnectionPool(HostConnectionPoolSetup(host, port, cps)) } @@ -297,8 +297,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte */ def newHostConnectionPoolHttps[T](host: String, port: Int = 443, connectionContext: HttpsConnectionContext = defaultClientHttpsContext, - settings: ConnectionPoolSettings = defaultConnectionPoolSettings, - log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { + settings: ConnectionPoolSettings = defaultConnectionPoolSettings, + log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { val cps = ConnectionPoolSetup(settings, connectionContext, log) newHostConnectionPool(HostConnectionPoolSetup(host, port, cps)) } @@ -320,7 +320,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * object of type `T` from the application which is emitted together with the corresponding response. */ private[akka] def newHostConnectionPool[T](setup: HostConnectionPoolSetup)( - implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { + implicit + fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { val gatewayFuture = FastFuture.successful(new PoolGateway(setup, Promise())) gatewayClientFlow(setup, gatewayFuture) } @@ -347,7 +348,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte */ def cachedHostConnectionPool[T](host: String, port: Int = 80, settings: ConnectionPoolSettings = defaultConnectionPoolSettings, - log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { + log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { val cps = ConnectionPoolSetup(settings, ConnectionContext.noEncryption(), log) val setup = HostConnectionPoolSetup(host, port, cps) cachedHostConnectionPool(setup) @@ -364,8 +365,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte */ def cachedHostConnectionPoolHttps[T](host: String, port: Int = 443, connectionContext: HttpsConnectionContext = defaultClientHttpsContext, - settings: ConnectionPoolSettings = defaultConnectionPoolSettings, - log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { + settings: ConnectionPoolSettings = defaultConnectionPoolSettings, + log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = { val cps = ConnectionPoolSetup(settings, connectionContext, log) val setup = HostConnectionPoolSetup(host, port, cps) cachedHostConnectionPool(setup) @@ -389,7 +390,8 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * object of type `T` from the application which is emitted together with the corresponding response. */ private def cachedHostConnectionPool[T](setup: HostConnectionPoolSetup)( - implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = + implicit + fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = gatewayClientFlow(setup, cachedGateway(setup)) /** @@ -410,9 +412,9 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * use the `akka.http.host-connection-pool` config section or pass in a [[ConnectionPoolSettings]] explicitly. */ def superPool[T](connectionContext: HttpsConnectionContext = defaultClientHttpsContext, - settings: ConnectionPoolSettings = defaultConnectionPoolSettings, - log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] = - clientFlow[T](settings) { request ⇒ request -> cachedGateway(request, settings, connectionContext, log) } + settings: ConnectionPoolSettings = defaultConnectionPoolSettings, + log: LoggingAdapter = system.log)(implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] = + clientFlow[T](settings) { request ⇒ request → cachedGateway(request, settings, connectionContext, log) } /** * Fires a single [[HttpRequest]] across the (cached) host connection pool for the request's @@ -423,10 +425,10 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * * Note that the request must have an absolute URI, otherwise the future will be completed with an error. */ - def singleRequest(request: HttpRequest, + def singleRequest(request: HttpRequest, connectionContext: HttpsConnectionContext = defaultClientHttpsContext, - settings: ConnectionPoolSettings = defaultConnectionPoolSettings, - log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[HttpResponse] = + settings: ConnectionPoolSettings = defaultConnectionPoolSettings, + log: LoggingAdapter = system.log)(implicit fm: Materializer): Future[HttpResponse] = try { val gatewayFuture = cachedGateway(request, settings, connectionContext, log) gatewayFuture.flatMap(_(request))(fm.executionContext) @@ -440,9 +442,9 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * * The layer is not reusable and must only be materialized once. */ - def webSocketClientLayer(request: WebSocketRequest, + def webSocketClientLayer(request: WebSocketRequest, settings: ClientConnectionSettings = ClientConnectionSettings(system), - log: LoggingAdapter = system.log): Http.WebSocketClientLayer = + log: LoggingAdapter = system.log): Http.WebSocketClientLayer = WebSocketClientBlueprint(request, settings, log) /** @@ -450,11 +452,11 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * * The layer is not reusable and must only be materialized once. */ - def webSocketClientFlow(request: WebSocketRequest, - connectionContext: ConnectionContext = defaultClientHttpsContext, - localAddress: Option[InetSocketAddress] = None, - settings: ClientConnectionSettings = ClientConnectionSettings(system), - log: LoggingAdapter = system.log): Flow[Message, Message, Future[WebSocketUpgradeResponse]] = { + def webSocketClientFlow(request: WebSocketRequest, + connectionContext: ConnectionContext = defaultClientHttpsContext, + localAddress: Option[InetSocketAddress] = None, + settings: ClientConnectionSettings = ClientConnectionSettings(system), + log: LoggingAdapter = system.log): Flow[Message, Message, Future[WebSocketUpgradeResponse]] = { import request.uri require(uri.isAbsolute, s"WebSocket request URI must be absolute but was '$uri'") @@ -477,12 +479,12 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte * Runs a single WebSocket conversation given a Uri and a flow that represents the client side of the * WebSocket conversation. */ - def singleWebSocketRequest[T](request: WebSocketRequest, - clientFlow: Flow[Message, Message, T], - connectionContext: ConnectionContext = defaultClientHttpsContext, - localAddress: Option[InetSocketAddress] = None, - settings: ClientConnectionSettings = ClientConnectionSettings(system), - log: LoggingAdapter = system.log)(implicit mat: Materializer): (Future[WebSocketUpgradeResponse], T) = + def singleWebSocketRequest[T](request: WebSocketRequest, + clientFlow: Flow[Message, Message, T], + connectionContext: ConnectionContext = defaultClientHttpsContext, + localAddress: Option[InetSocketAddress] = None, + settings: ClientConnectionSettings = ClientConnectionSettings(system), + log: LoggingAdapter = system.log)(implicit mat: Materializer): (Future[WebSocketUpgradeResponse], T) = webSocketClientFlow(request, connectionContext, localAddress, settings, log) .joinMat(clientFlow)(Keep.both).run() @@ -548,7 +550,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte // every ActorSystem maintains its own connection pools private[http] val hostPoolCache = new ConcurrentHashMap[HostConnectionPoolSetup, Future[PoolGateway]] - private def cachedGateway(request: HttpRequest, + private def cachedGateway(request: HttpRequest, settings: ConnectionPoolSettings, connectionContext: ConnectionContext, log: LoggingAdapter)(implicit fm: Materializer): Future[PoolGateway] = if (request.uri.scheme.nonEmpty && request.uri.authority.nonEmpty) { @@ -586,14 +588,16 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte } } - private def gatewayClientFlow[T](hcps: HostConnectionPoolSetup, + private def gatewayClientFlow[T](hcps: HostConnectionPoolSetup, gatewayFuture: Future[PoolGateway])( - implicit fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = - clientFlow[T](hcps.setup.settings)(_ -> gatewayFuture) + implicit + fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), HostConnectionPool] = + clientFlow[T](hcps.setup.settings)(_ → gatewayFuture) .mapMaterializedValue(_ ⇒ HostConnectionPool(hcps)(gatewayFuture)) private def clientFlow[T](settings: ConnectionPoolSettings)(f: HttpRequest ⇒ (HttpRequest, Future[PoolGateway]))( - implicit system: ActorSystem, fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] = { + implicit + system: ActorSystem, fm: Materializer): Flow[(HttpRequest, T), (Try[HttpResponse], T), NotUsed] = { // a connection pool can never have more than pipeliningLimit * maxConnections requests in flight at any point val parallelism = settings.pipeliningLimit * settings.maxConnections Flow[(HttpRequest, T)].mapAsyncUnordered(parallelism) { @@ -602,7 +606,7 @@ class HttpExt(private val config: Config)(implicit val system: ActorSystem) exte val result = Promise[(Try[HttpResponse], T)]() // TODO: simplify to `transformWith` when on Scala 2.12 gatewayFuture .flatMap(_(effectiveRequest))(fm.executionContext) - .onComplete(responseTry ⇒ result.success(responseTry -> userContext))(fm.executionContext) + .onComplete(responseTry ⇒ result.success(responseTry → userContext))(fm.executionContext) result.future } } @@ -684,9 +688,9 @@ object Http extends ExtensionId[HttpExt] with ExtensionIdProvider { * Represents one accepted incoming HTTP connection. */ final case class IncomingConnection( - localAddress: InetSocketAddress, + localAddress: InetSocketAddress, remoteAddress: InetSocketAddress, - flow: Flow[HttpResponse, HttpRequest, NotUsed]) { + flow: Flow[HttpResponse, HttpRequest, NotUsed]) { /** * Handles the connection with the given flow, which is materialized exactly once diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala index 1b58ea5..c5e95a5 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/DateTime.scala @@ -11,14 +11,14 @@ import akka.http.impl.util._ * Does not support TimeZones, all DateTime values are always GMT based. * Note that this implementation discards milliseconds (i.e. rounds down to full seconds). */ -final case class DateTime private (year: Int, // the year - month: Int, // the month of the year. January is 1. - day: Int, // the day of the month. The first day is 1. - hour: Int, // the hour of the day. The first hour is 0. - minute: Int, // the minute of the hour. The first minute is 0. - second: Int, // the second of the minute. The first second is 0. - weekday: Int, // the day of the week. Sunday is 0. - clicks: Long, // milliseconds since January 1, 1970, 00:00:00 GMT +final case class DateTime private (year: Int, // the year + month: Int, // the month of the year. January is 1. + day: Int, // the day of the month. The first day is 1. + hour: Int, // the hour of the day. The first hour is 0. + minute: Int, // the minute of the hour. The first minute is 0. + second: Int, // the second of the minute. The first second is 0. + weekday: Int, // the day of the week. Sunday is 0. + clicks: Long, // milliseconds since January 1, 1970, 00:00:00 GMT isLeapYear: Boolean) extends akka.http.javadsl.model.DateTime with Ordered[DateTime] with Renderable { /** * The day of the week as a 3 letter abbreviation: diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala index 8ade900..f5aef44 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpEntity.scala @@ -8,7 +8,7 @@ import java.util.OptionalLong import language.implicitConversions import java.io.File -import java.lang.{ Iterable ⇒ JIterable} +import java.lang.{ Iterable ⇒ JIterable } import scala.util.control.NonFatal import scala.concurrent.Future import scala.concurrent.duration._ @@ -264,9 +264,9 @@ object HttpEntity { /** * The model for the entity of a "regular" unchunked HTTP message with a known non-zero length. */ - final case class Default(contentType: ContentType, + final case class Default(contentType: ContentType, contentLength: Long, - data: Source[ByteString, Any]) + data: Source[ByteString, Any]) extends jm.HttpEntity.Default with UniversalEntity { require(contentLength > 0, "contentLength must be positive (use `HttpEntity.empty(contentType)` for empty entities)") def isKnownEmpty = false @@ -515,18 +515,18 @@ object HttpEntity { */ private[http] def captureTermination[T <: HttpEntity](entity: T): (T, Future[Unit]) = entity match { - case x: HttpEntity.Strict ⇒ x.asInstanceOf[T] -> FastFuture.successful(()) + case x: HttpEntity.Strict ⇒ x.asInstanceOf[T] → FastFuture.successful(()) case x: HttpEntity.Default ⇒ val (newData, whenCompleted) = StreamUtils.captureTermination(x.data) - x.copy(data = newData).asInstanceOf[T] -> whenCompleted + x.copy(data = newData).asInstanceOf[T] → whenCompleted case x: HttpEntity.Chunked ⇒ val (newChunks, whenCompleted) = StreamUtils.captureTermination(x.chunks) - x.copy(chunks = newChunks).asInstanceOf[T] -> whenCompleted + x.copy(chunks = newChunks).asInstanceOf[T] → whenCompleted case x: HttpEntity.CloseDelimited ⇒ val (newData, whenCompleted) = StreamUtils.captureTermination(x.data) - x.copy(data = newData).asInstanceOf[T] -> whenCompleted + x.copy(data = newData).asInstanceOf[T] → whenCompleted case x: HttpEntity.IndefiniteLength ⇒ val (newData, whenCompleted) = StreamUtils.captureTermination(x.data) - x.copy(data = newData).asInstanceOf[T] -> whenCompleted + x.copy(data = newData).asInstanceOf[T] → whenCompleted } } diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala index 627e5a7..08c2a62 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMessage.scala @@ -134,11 +134,11 @@ object HttpMessage { /** * The immutable model HTTP request model. */ -final case class HttpRequest(method: HttpMethod = HttpMethods.GET, - uri: Uri = Uri./, - headers: immutable.Seq[HttpHeader] = Nil, - entity: RequestEntity = HttpEntity.Empty, - protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) extends jm.HttpRequest with HttpMessage { +final case class HttpRequest(method: HttpMethod = HttpMethods.GET, + uri: Uri = Uri./, + headers: immutable.Seq[HttpHeader] = Nil, + entity: RequestEntity = HttpEntity.Empty, + protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) extends jm.HttpRequest with HttpMessage { HttpRequest.verifyUri(uri) require(entity.isKnownEmpty || method.isEntityAccepted, s"Requests with method '${method.value}' must have an empty entity") require(protocol != HttpProtocols.`HTTP/1.0` || !entity.isInstanceOf[HttpEntity.Chunked], @@ -244,10 +244,10 @@ object HttpRequest { /** * The immutable HTTP response model. */ -final case class HttpResponse(status: StatusCode = StatusCodes.OK, - headers: immutable.Seq[HttpHeader] = Nil, - entity: ResponseEntity = HttpEntity.Empty, - protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) extends jm.HttpResponse with HttpMessage { +final case class HttpResponse(status: StatusCode = StatusCodes.OK, + headers: immutable.Seq[HttpHeader] = Nil, + entity: ResponseEntity = HttpEntity.Empty, + protocol: HttpProtocol = HttpProtocols.`HTTP/1.1`) extends jm.HttpResponse with HttpMessage { require(entity.isKnownEmpty || status.allowsEntity, "Responses with this status code must have an empty entity") require(protocol == HttpProtocols.`HTTP/1.1` || !entity.isInstanceOf[HttpEntity.Chunked], "HTTP/1.0 responses must not have a chunked entity") diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala index 2184e1d..52bdf3f 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/HttpMethod.scala @@ -29,9 +29,9 @@ object RequestEntityAcceptance { * @param isIdempotent true if requests can be safely (& automatically) repeated * @param requestEntityAcceptance Expected if meaning of request entities is properly defined */ -final case class HttpMethod private[http] (override val value: String, - isSafe: Boolean, - isIdempotent: Boolean, +final case class HttpMethod private[http] (override val value: String, + isSafe: Boolean, + isIdempotent: Boolean, requestEntityAcceptance: RequestEntityAcceptance) extends jm.HttpMethod with SingletonValueRenderable { override def isEntityAccepted: Boolean = requestEntityAcceptance.isEntityAccepted override def toString: String = s"HttpMethod($value)" diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala index f4119ab..ed8c385 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaRange.scala @@ -50,8 +50,8 @@ sealed abstract class MediaRange extends jm.MediaRange with Renderable with With object MediaRange { private[http] def splitOffQValue(params: Map[String, String], defaultQ: Float = 1.0f): (Map[String, String], Float) = params.get("q") match { - case Some(x) ⇒ (params - "q") -> (try x.toFloat catch { case _: NumberFormatException ⇒ 1.0f }) - case None ⇒ params -> defaultQ + case Some(x) ⇒ (params - "q") → (try x.toFloat catch { case _: NumberFormatException ⇒ 1.0f }) + case None ⇒ params → defaultQ } private final case class Custom(mainType: String, params: Map[String, String], qValue: Float) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala index 322a766..a24a3bf 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/MediaType.scala @@ -126,8 +126,8 @@ object MediaType { } def customWithFixedCharset(mainType: String, subType: String, charset: HttpCharset, fileExtensions: List[String] = Nil, - params: Map[String, String] = Map.empty, - allowArbitrarySubtypes: Boolean = false): WithFixedCharset = { + params: Map[String, String] = Map.empty, + allowArbitrarySubtypes: Boolean = false): WithFixedCharset = { require(mainType != "multipart", "Cannot create a MediaType.Multipart here, use `customMultipart` instead!") require(allowArbitrarySubtypes || subType != "*", "Cannot create a MediaRange here, use `MediaRange.custom` instead!") val _params = params @@ -143,8 +143,8 @@ object MediaType { } def customWithOpenCharset(mainType: String, subType: String, fileExtensions: List[String] = Nil, - params: Map[String, String] = Map.empty, - allowArbitrarySubtypes: Boolean = false): WithOpenCharset = { + params: Map[String, String] = Map.empty, + allowArbitrarySubtypes: Boolean = false): WithOpenCharset = { require(mainType != "multipart", "Cannot create a MediaType.Multipart here, use `customMultipart` instead!") require(allowArbitrarySubtypes || subType != "*", "Cannot create a MediaRange here, use `MediaRange.custom` instead!") val _params = params @@ -274,7 +274,7 @@ object MediaTypes extends ObjectRegistry[(String, String), MediaType] { private def register[T <: MediaType](mediaType: T): T = { registerFileExtensions(mediaType) - register(mediaType.mainType.toRootLowerCase -> mediaType.subType.toRootLowerCase, mediaType) + register(mediaType.mainType.toRootLowerCase → mediaType.subType.toRootLowerCase, mediaType) } import MediaType._ diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala index 0d4b2dd..853c504 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Multipart.scala @@ -56,8 +56,8 @@ sealed trait Multipart extends jm.Multipart { /** * Creates a [[MessageEntity]] from this multipart object. */ - def toEntity(charset: HttpCharset = HttpCharsets.`UTF-8`, - boundary: String = BodyPartRenderer.randomBoundary())(implicit log: LoggingAdapter = NoLogging): MessageEntity = { + def toEntity(charset: HttpCharset = HttpCharsets.`UTF-8`, + boundary: String = BodyPartRenderer.randomBoundary())(implicit log: LoggingAdapter = NoLogging): MessageEntity = { val chunks = parts .transform(() ⇒ BodyPartRenderer.streamed(boundary, charset.nioCharset, partHeadersSizeHint = 128, log)) @@ -224,7 +224,7 @@ object Multipart { } def unapply(value: Multipart.General): Option[(MediaType.Multipart, Source[Multipart.General.BodyPart, Any])] = - Some(value.mediaType -> value.parts) + Some(value.mediaType → value.parts) /** * Strict [[General]] multipart content. @@ -284,7 +284,7 @@ object Multipart { override def toString = s"General.BodyPart($entity, $headers)" } - def unapply(value: BodyPart): Option[(BodyPartEntity, immutable.Seq[HttpHeader])] = Some(value.entity -> value.headers) + def unapply(value: BodyPart): Option[(BodyPartEntity, immutable.Seq[HttpHeader])] = Some(value.entity → value.headers) /** * Strict [[General.BodyPart]]. @@ -419,8 +419,8 @@ object Multipart { } object BodyPart { def apply(_name: String, _entity: BodyPartEntity, - _additionalDispositionParams: Map[String, String] = Map.empty, - _additionalHeaders: immutable.Seq[HttpHeader] = Nil): Multipart.FormData.BodyPart = + _additionalDispositionParams: Map[String, String] = Map.empty, + _additionalHeaders: immutable.Seq[HttpHeader] = Nil): Multipart.FormData.BodyPart = new Multipart.FormData.BodyPart { def name = _name def additionalDispositionParams = _additionalDispositionParams @@ -433,7 +433,7 @@ object Multipart { * Creates a BodyPart backed by a File that will be streamed using a FileSource. */ def fromFile(name: String, contentType: ContentType, file: File, chunkSize: Int = -1): BodyPart = - BodyPart(name, HttpEntity(contentType, file, chunkSize), Map("filename" -> file.getName)) + BodyPart(name, HttpEntity(contentType, file, chunkSize), Map("filename" → file.getName)) def unapply(value: BodyPart): Option[(String, BodyPartEntity, Map[String, String], immutable.Seq[HttpHeader])] = Some((value.name, value.entity, value.additionalDispositionParams, value.additionalHeaders)) @@ -442,8 +442,8 @@ object Multipart { * Strict [[FormData.BodyPart]]. */ case class Strict(name: String, entity: HttpEntity.Strict, - additionalDispositionParams: Map[String, String] = Map.empty, - additionalHeaders: immutable.Seq[HttpHeader] = Nil) + additionalDispositionParams: Map[String, String] = Map.empty, + additionalHeaders: immutable.Seq[HttpHeader] = Nil) extends Multipart.FormData.BodyPart with Multipart.BodyPart.Strict with jm.Multipart.FormData.BodyPart.Strict { override def toStrict(timeout: FiniteDuration)(implicit fm: Materializer): Future[Multipart.FormData.BodyPart.Strict] = FastFuture.successful(this) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala index 7afdab3..827b18a 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/Uri.scala @@ -595,9 +595,9 @@ object Uri { } private val defaultPorts: Map[String, Int] = - Map("ftp" -> 21, "ssh" -> 22, "telnet" -> 23, "smtp" -> 25, "domain" -> 53, "tftp" -> 69, "http" -> 80, "ws" -> 80, - "pop3" -> 110, "nntp" -> 119, "imap" -> 143, "snmp" -> 161, "ldap" -> 389, "https" -> 443, "wss" -> 443, "imaps" -> 993, - "nfs" -> 2049).withDefaultValue(-1) + Map("ftp" → 21, "ssh" → 22, "telnet" → 23, "smtp" → 25, "domain" → 53, "tftp" → 69, "http" → 80, "ws" → 80, + "pop3" → 110, "nntp" → 119, "imap" → 143, "snmp" → 161, "ldap" → 389, "https" → 443, "wss" → 443, "imaps" → 993, + "nfs" → 2049).withDefaultValue(-1) sealed trait ParsingMode extends akka.http.javadsl.model.Uri.ParsingMode object ParsingMode { diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala index ec5d07e..5e2507e 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/HttpCookie.scala @@ -17,7 +17,7 @@ import scala.compat.java8.OptionConverters._ // see http://tools.ietf.org/html/rfc6265 // sealed abstract to prevent generation of default apply method in companion sealed abstract case class HttpCookiePair private ( - name: String, + name: String, value: String) extends jm.headers.HttpCookiePair with ToStringRenderable { def render[R <: Rendering](r: R): r.type = r ~~ name ~~ '=' ~~ value @@ -50,15 +50,15 @@ object HttpCookiePair { // see http://tools.ietf.org/html/rfc6265 final case class HttpCookie( - name: String, - value: String, - expires: Option[DateTime] = None, - maxAge: Option[Long] = None, - domain: Option[String] = None, - path: Option[String] = None, - secure: Boolean = false, - httpOnly: Boolean = false, - extension: Option[String] = None) extends jm.headers.HttpCookie with ToStringRenderable { + name: String, + value: String, + expires: Option[DateTime] = None, + maxAge: Option[Long] = None, + domain: Option[String] = None, + path: Option[String] = None, + secure: Boolean = false, + httpOnly: Boolean = false, + extension: Option[String] = None) extends jm.headers.HttpCookie with ToStringRenderable { /** Returns the name/value pair for this cookie, to be used in [[Cookie]] headers. */ def pair: HttpCookiePair = HttpCookiePair(name, value) @@ -111,14 +111,14 @@ final case class HttpCookie( } object HttpCookie { - def fromPair(pair: HttpCookiePair, - expires: Option[DateTime] = None, - maxAge: Option[Long] = None, - domain: Option[String] = None, - path: Option[String] = None, - secure: Boolean = false, - httpOnly: Boolean = false, - extension: Option[String] = None): HttpCookie = + def fromPair(pair: HttpCookiePair, + expires: Option[DateTime] = None, + maxAge: Option[Long] = None, + domain: Option[String] = None, + path: Option[String] = None, + secure: Boolean = false, + httpOnly: Boolean = false, + extension: Option[String] = None): HttpCookie = HttpCookie(pair.name, pair.value, expires, maxAge, domain, path, secure, httpOnly, extension) import akka.http.impl.model.parser.CharacterClasses._ diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala index 172e6f9..58e05b6 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/headers/headers.scala @@ -640,7 +640,7 @@ final case class RawHeader(name: String, value: String) extends jm.headers.RawHe } object RawHeader { def unapply[H <: HttpHeader](customHeader: H): Option[(String, String)] = - Some(customHeader.name -> customHeader.value) + Some(customHeader.name → customHeader.value) } object `Raw-Request-URI` extends ModeledCompanion[`Raw-Request-URI`] diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala index 336082e..e3e3779 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/UpgradeToWebSocket.scala @@ -35,7 +35,7 @@ trait UpgradeToWebSocket extends jm.ws.UpgradeToWebSocket { * Optionally, a subprotocol out of the ones requested by the client can be chosen. */ def handleMessages(handlerFlow: Graph[FlowShape[Message, Message], Any], - subprotocol: Option[String] = None): HttpResponse + subprotocol: Option[String] = None): HttpResponse /** * The high-level interface to create a WebSocket server based on "messages". @@ -47,9 +47,9 @@ trait UpgradeToWebSocket extends jm.ws.UpgradeToWebSocket { * * Optionally, a subprotocol out of the ones requested by the client can be chosen. */ - def handleMessagesWithSinkSource(inSink: Graph[SinkShape[Message], Any], - outSource: Graph[SourceShape[Message], Any], - subprotocol: Option[String] = None): HttpResponse = + def handleMessagesWithSinkSource(inSink: Graph[SinkShape[Message], Any], + outSource: Graph[SourceShape[Message], Any], + subprotocol: Option[String] = None): HttpResponse = handleMessages(scaladsl.Flow.fromSinkAndSource(inSink, outSource), subprotocol) import scala.collection.JavaConverters._ @@ -80,8 +80,8 @@ trait UpgradeToWebSocket extends jm.ws.UpgradeToWebSocket { /** * Java API */ - def handleMessagesWith(inSink: Graph[SinkShape[jm.ws.Message], _ <: Any], - outSource: Graph[SourceShape[jm.ws.Message], _ <: Any], + def handleMessagesWith(inSink: Graph[SinkShape[jm.ws.Message], _ <: Any], + outSource: Graph[SourceShape[jm.ws.Message], _ <: Any], subprotocol: String): HttpResponse = handleMessages(createScalaFlow(inSink, outSource), subprotocol = Some(subprotocol)) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala index 89e482b..9a76e7f 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/model/ws/WebSocketRequest.scala @@ -17,9 +17,9 @@ import akka.http.scaladsl.model.{ HttpHeader, Uri } * @param subprotocol A WebSocket subprotocol if required. */ final case class WebSocketRequest( - uri: Uri, + uri: Uri, extraHeaders: immutable.Seq[HttpHeader] = Nil, - subprotocol: Option[String] = None) + subprotocol: Option[String] = None) object WebSocketRequest { implicit def fromTargetUri(uri: Uri): WebSocketRequest = WebSocketRequest(uri) implicit def fromTargetUriString(uriString: String): WebSocketRequest = WebSocketRequest(uriString) diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala index e941532..e56c7d8 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/settings/ParserSettings.scala @@ -83,11 +83,11 @@ abstract class ParserSettings private[akka] () extends akka.http.javadsl.setting def withErrorLoggingVerbosity(newValue: ParserSettings.ErrorLoggingVerbosity): ParserSettings = self.copy(errorLoggingVerbosity = newValue) def withHeaderValueCacheLimits(newValue: Map[String, Int]): ParserSettings = self.copy(headerValueCacheLimits = newValue) def withCustomMethods(methods: HttpMethod*): ParserSettings = { - val map = methods.map(m ⇒ m.name -> m).toMap + val map = methods.map(m ⇒ m.name → m).toMap self.copy(customMethods = map.get) } def withCustomStatusCodes(codes: StatusCode*): ParserSettings = { - val map = codes.map(c ⇒ c.intValue -> c).toMap + val map = codes.map(c ⇒ c.intValue → c).toMap self.copy(customStatusCodes = map.get) } } diff --git a/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala b/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala index fad9734..03a78c6 100644 --- a/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala +++ b/akka-http-core/src/main/scala/akka/http/scaladsl/util/FastFuture.scala @@ -80,9 +80,9 @@ object FastFuture { def isCompleted = true def result(atMost: Duration)(implicit permit: CanAwait) = a def ready(atMost: Duration)(implicit permit: CanAwait) = this - def transform[S](f: scala.util.Try[A] => scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = + def transform[S](f: scala.util.Try[A] ⇒ scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = FastFuture(f(Success(a))) - def transformWith[S](f: scala.util.Try[A] => scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = + def transformWith[S](f: scala.util.Try[A] ⇒ scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = new FastFuture(this).transformWith(f) } private case class ErrorFuture(error: Throwable) extends Future[Nothing] { @@ -91,9 +91,9 @@ object FastFuture { def isCompleted = true def result(atMost: Duration)(implicit permit: CanAwait) = throw error def ready(atMost: Duration)(implicit permit: CanAwait) = this - def transform[S](f: scala.util.Try[Nothing] => scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = + def transform[S](f: scala.util.Try[Nothing] ⇒ scala.util.Try[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = FastFuture(f(Failure(error))) - def transformWith[S](f: scala.util.Try[Nothing] => scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = + def transformWith[S](f: scala.util.Try[Nothing] ⇒ scala.concurrent.Future[S])(implicit executor: scala.concurrent.ExecutionContext): scala.concurrent.Future[S] = new FastFuture(this).transformWith(f) } diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala index 3114b15..b2db3a4 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/ConnectionPoolSpec.scala @@ -57,7 +57,7 @@ class ConnectionPoolSpec extends AkkaSpec(""" "properly complete a simple request/response cycle" in new TestSetup { val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]() - requestIn.sendNext(HttpRequest(uri = "/") -> 42) + requestIn.sendNext(HttpRequest(uri = "/") → 42) responseOutSub.request(1) acceptIncomingConnection() @@ -68,8 +68,8 @@ class ConnectionPoolSpec extends AkkaSpec(""" "open a second connection if the first one is loaded" in new TestSetup { val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]() - requestIn.sendNext(HttpRequest(uri = "/a") -> 42) - requestIn.sendNext(HttpRequest(uri = "/b") -> 43) + requestIn.sendNext(HttpRequest(uri = "/a") → 42) + requestIn.sendNext(HttpRequest(uri = "/b") → 43) responseOutSub.request(2) acceptIncomingConnection() @@ -97,7 +97,7 @@ class ConnectionPoolSpec extends AkkaSpec(""" case x ⇒ super.testServerHandler(connNr)(x) } - requestIn.sendNext(HttpRequest(uri = "/a") -> 42) + requestIn.sendNext(HttpRequest(uri = "/a") → 42) responseOutSub.request(1) acceptIncomingConnection() val (Success(r1), 42) = responseOut.expectNext() @@ -107,7 +107,7 @@ class ConnectionPoolSpec extends AkkaSpec(""" responseEntityPub.sendNext(ByteString("YEAH")) responseEntityProbe.expectNext(ByteString("YEAH")) - requestIn.sendNext(HttpRequest(uri = "/b") -> 43) + requestIn.sendNext(HttpRequest(uri = "/b") → 43) responseOutSub.request(1) acceptIncomingConnection() val (Success(r2), 43) = responseOut.expectNext() @@ -117,13 +117,13 @@ class ConnectionPoolSpec extends AkkaSpec(""" "not open a second connection if there is an idle one available" in new TestSetup { val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]() - requestIn.sendNext(HttpRequest(uri = "/a") -> 42) + requestIn.sendNext(HttpRequest(uri = "/a") → 42) responseOutSub.request(1) acceptIncomingConnection() val (Success(response1), 42) = responseOut.expectNext() connNr(response1) shouldEqual 1 - requestIn.sendNext(HttpRequest(uri = "/b") -> 43) + requestIn.sendNext(HttpRequest(uri = "/b") → 43) responseOutSub.request(1) val (Success(response2), 43) = responseOut.expectNext() connNr(response2) shouldEqual 1 @@ -135,7 +135,7 @@ class ConnectionPoolSpec extends AkkaSpec(""" val N = 500 val requestIds = Source.fromIterator(() ⇒ Iterator.from(1)).take(N) - val idSum = requestIds.map(id ⇒ HttpRequest(uri = s"/r$id") -> id).via(poolFlow).map { + val idSum = requestIds.map(id ⇒ HttpRequest(uri = s"/r$id") → id).via(poolFlow).map { case (Success(response), id) ⇒ requestUri(response) should endWith(s"/r$id") id @@ -153,8 +153,8 @@ class ConnectionPoolSpec extends AkkaSpec(""" "properly surface connection-level errors" in new TestSetup(autoAccept = true) { val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int](maxRetries = 0) - requestIn.sendNext(HttpRequest(uri = "/a") -> 42) - requestIn.sendNext(HttpRequest(uri = "/crash") -> 43) + requestIn.sendNext(HttpRequest(uri = "/a") → 42) + requestIn.sendNext(HttpRequest(uri = "/crash") → 43) responseOutSub.request(2) override def mapServerSideOutboundRawBytes(bytes: ByteString): ByteString = @@ -169,8 +169,8 @@ class ConnectionPoolSpec extends AkkaSpec(""" "retry failed requests" in new TestSetup(autoAccept = true) { val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int]() - requestIn.sendNext(HttpRequest(uri = "/a") -> 42) - requestIn.sendNext(HttpRequest(uri = "/crash") -> 43) + requestIn.sendNext(HttpRequest(uri = "/a") → 42) + requestIn.sendNext(HttpRequest(uri = "/crash") → 43) responseOutSub.request(2) val remainingResponsesToKill = new AtomicInteger(1) @@ -188,8 +188,8 @@ class ConnectionPoolSpec extends AkkaSpec(""" "respect the configured `maxRetries` value" in new TestSetup(autoAccept = true) { val (requestIn, responseOut, responseOutSub, hcp) = cachedHostConnectionPool[Int](maxRetries = 4) - requestIn.sendNext(HttpRequest(uri = "/a") -> 42) - requestIn.sendNext(HttpRequest(uri = "/crash") -> 43) + requestIn.sendNext(HttpRequest(uri = "/a") → 42) + requestIn.sendNext(HttpRequest(uri = "/crash") → 43) responseOutSub.request(2) val remainingResponsesToKill = new AtomicInteger(5) @@ -222,7 +222,7 @@ class ConnectionPoolSpec extends AkkaSpec(""" val PoolGateway.Running(_, _, shutdownCompletedPromise) = gateway.currentState Await.result(shutdownCompletedPromise.future, 1500.millis) // verify shutdown completed - requestIn.sendNext(HttpRequest(uri = "/") -> 42) + requestIn.sendNext(HttpRequest(uri = "/") → 42) responseOutSub.request(1) acceptIncomingConnection() @@ -272,8 +272,8 @@ class ConnectionPoolSpec extends AkkaSpec(""" val (requestIn, responseOut, responseOutSub, hcp) = superPool[Int]() - requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName:$serverPort/a") -> 42) - requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName2:$serverPort2/b") -> 43) + requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName:$serverPort/a") → 42) + requestIn.sendNext(HttpRequest(uri = s"http://$serverHostName2:$serverPort2/b") → 43) responseOutSub.request(2) Seq(responseOut.expectNext(), responseOut.expectNext()) foreach { @@ -285,7 +285,7 @@ class ConnectionPoolSpec extends AkkaSpec(""" } class TestSetup(serverSettings: ServerSettings = ServerSettings(system), - autoAccept: Boolean = false) { + autoAccept: Boolean = false) { val (serverEndpoint, serverHostName, serverPort) = TestUtils.temporaryServerHostnameAndPort() def testServerHandler(connNr: Int): HttpRequest ⇒ HttpResponse = { @@ -322,23 +322,23 @@ class ConnectionPoolSpec extends AkkaSpec(""" private def handleConnection(c: Http.IncomingConnection) = c.handleWithSyncHandler(testServerHandler(incomingConnectionCounter.incrementAndGet())) - def cachedHostConnectionPool[T](maxConnections: Int = 2, - maxRetries: Int = 2, - maxOpenRequests: Int = 8, - pipeliningLimit: Int = 1, - idleTimeout: Duration = 5.seconds, - ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = { + def cachedHostConnectionPool[T](maxConnections: Int = 2, + maxRetries: Int = 2, + maxOpenRequests: Int = 8, + pipeliningLimit: Int = 1, + idleTimeout: Duration = 5.seconds, + ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = { val settings = new ConnectionPoolSettingsImpl(maxConnections, maxRetries, maxOpenRequests, pipeliningLimit, idleTimeout, ClientConnectionSettings(system)) flowTestBench(Http().cachedHostConnectionPool[T](serverHostName, serverPort, settings)) } - def superPool[T](maxConnections: Int = 2, - maxRetries: Int = 2, - maxOpenRequests: Int = 8, - pipeliningLimit: Int = 1, - idleTimeout: Duration = 5.seconds, - ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = { + def superPool[T](maxConnections: Int = 2, + maxRetries: Int = 2, + maxOpenRequests: Int = 8, + pipeliningLimit: Int = 1, + idleTimeout: Duration = 5.seconds, + ccSettings: ClientConnectionSettings = ClientConnectionSettings(system)) = { val settings = new ConnectionPoolSettingsImpl(maxConnections, maxRetries, maxOpenRequests, pipeliningLimit, idleTimeout, ClientConnectionSettings(system)) flowTestBench(Http().superPool[T](settings = settings)) diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala index 1c7ed51..d15de9f 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/LowLevelOutgoingConnectionSpec.scala @@ -11,7 +11,7 @@ import org.scalatest.concurrent.ScalaFutures import akka.http.scaladsl.settings.ClientConnectionSettings import akka.util.ByteString import akka.event.NoLogging -import akka.stream.{ClosedShape, ActorMaterializer} +import akka.stream.{ ClosedShape, ActorMaterializer } import akka.stream.TLSProtocol._ import akka.stream.testkit._ import akka.stream.scaladsl._ @@ -166,7 +166,7 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka. |""") inside(expectResponse()) { - case HttpResponse(StatusCodes.OK, _, HttpEntity.Chunked(_, data), _) => + case HttpResponse(StatusCodes.OK, _, HttpEntity.Chunked(_, data), _) ⇒ val dataProbe = TestSubscriber.manualProbe[ChunkStreamPart] // but only one consumed by server data.take(1).to(Sink.fromSubscriber(dataProbe)).run() @@ -180,7 +180,7 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka. } "proceed to next response once previous response's entity has been drained" in new TestSetup with ScalaFutures { - def twice(action: => Unit): Unit = { action; action } + def twice(action: ⇒ Unit): Unit = { action; action } twice { requestsSub.sendNext(HttpRequest()) @@ -203,11 +203,10 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka. |""") val whenComplete = expectResponse().entity.dataBytes.runWith(Sink.ignore) - whenComplete.futureValue should be (akka.Done) + whenComplete.futureValue should be(akka.Done) } } - "handle several requests on one persistent connection" which { "has a first response that was chunked" in new TestSetup { requestsSub.sendNext(HttpRequest()) @@ -601,17 +600,16 @@ class LowLevelOutgoingConnectionSpec extends AkkaSpec("akka.loggers = []\n akka. val netOut = TestSubscriber.manualProbe[ByteString] val netIn = TestPublisher.manualProbe[ByteString]() - RunnableGraph.fromGraph(GraphDSL.create(OutgoingConnectionBlueprint(Host("example.com"), settings, NoLogging)) { implicit b ⇒ - client ⇒ - import GraphDSL.Implicits._ - Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2 - client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> Sink.fromSubscriber(netOut) - Source.fromPublisher(requests) ~> client.in1 - client.out2 ~> Sink.fromSubscriber(responses) - ClosedShape + RunnableGraph.fromGraph(GraphDSL.create(OutgoingConnectionBlueprint(Host("example.com"), settings, NoLogging)) { implicit b ⇒ client ⇒ + import GraphDSL.Implicits._ + Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2 + client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> Sink.fromSubscriber(netOut) + Source.fromPublisher(requests) ~> client.in1 + client.out2 ~> Sink.fromSubscriber(responses) + ClosedShape }).run() - netOut -> netIn + netOut → netIn } def wipeDate(string: String) = diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala index 1b0492e..2b2ce8c 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/client/TlsEndpointVerificationSpec.scala @@ -99,7 +99,7 @@ class TlsEndpointVerificationSpec extends AkkaSpec(""" } val serverSideTls = Http().sslTlsStage(ExampleHttpContexts.exampleServerContext, Server) - val clientSideTls = Http().sslTlsStage(clientContext, Client, Some(hostname -> 8080)) + val clientSideTls = Http().sslTlsStage(clientContext, Client, Some(hostname → 8080)) val server = Http().serverLayer() diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala index 87ae14a..4282538 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/HttpHeaderParserSpec.scala @@ -33,11 +33,11 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll check { """nodes: 0/H, 0/e, 0/l, 0/l, 0/o, 1/Ω |branchData:\u0020 - |values: 'Hello""" -> parser.formatRawTrie + |values: 'Hello""" → parser.formatRawTrie } check { """-H-e-l-l-o- 'Hello - |""" -> parser.formatTrie + |""" → parser.formatTrie } } @@ -47,12 +47,12 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll check { """nodes: 0/H, 1/e, 0/l, 0/l, 0/o, 1/Ω, 0/a, 0/l, 0/l, 0/o, 2/Ω |branchData: 6/2/0 - |values: 'Hello, 'Hallo""" -> parser.formatRawTrie + |values: 'Hello, 'Hallo""" → parser.formatRawTrie } check { """ ┌─a-l-l-o- 'Hallo |-H-e-l-l-o- 'Hello - |""" -> parser.formatTrie + |""" → parser.formatTrie } } @@ -63,13 +63,13 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll check { """nodes: 2/H, 1/e, 0/l, 0/l, 0/o, 1/Ω, 0/a, 0/l, 0/l, 0/o, 2/Ω, 0/Y, 0/e, 0/a, 0/h, 3/Ω |branchData: 6/2/0, 0/1/11 - |values: 'Hello, 'Hallo, 'Yeah""" -> parser.formatRawTrie + |values: 'Hello, 'Hallo, 'Yeah""" → parser.formatRawTrie } check { """ ┌─a-l-l-o- 'Hallo |-H-e-l-l-o- 'Hello | └─Y-e-a-h- 'Yeah - |""" -> parser.formatTrie + |""" → parser.formatTrie } } @@ -81,14 +81,14 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll check { """nodes: 2/H, 1/e, 0/l, 0/l, 0/o, 1/Ω, 0/a, 0/l, 0/l, 0/o, 2/Ω, 0/Y, 0/e, 0/a, 0/h, 3/Ω, 0/o, 0/o, 4/Ω |branchData: 6/2/16, 0/1/11 - |values: 'Hello, 'Hallo, 'Yeah, 'Hoo""" -> parser.formatRawTrie + |values: 'Hello, 'Hallo, 'Yeah, 'Hoo""" → parser.formatRawTrie } check { """ ┌─a-l-l-o- 'Hallo |-H-e-l-l-o- 'Hello | | └─o-o- 'Hoo | └─Y-e-a-h- 'Yeah - |""" -> parser.formatTrie + |""" → parser.formatTrie } } @@ -103,7 +103,7 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll |-H-e-l-l-o- 'Hello | | └─o-o- 'Foo | └─Y-e-a-h- 'Yeah - |""" -> parser.formatTrie + |""" → parser.formatTrie } } @@ -142,7 +142,7 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll check { """ ┌─f-a-n-c-y---p-a-n-t-s-:-(Fancy-Pants)- -f-o-o-\r-\n- *Fancy-Pants: foo |-h-e-l-l-o-:- -b-o-b- 'Hello - |""" -> parser.formatTrie + |""" → parser.formatTrie } ixA shouldEqual ixB headerA shouldEqual RawHeader("Fancy-Pants", "foo") @@ -253,7 +253,7 @@ class HttpHeaderParserSpec extends WordSpec with Matchers with BeforeAndAfterAll if (parser.isEmpty) HttpHeaderParser.insertRemainingCharsAsNewNodes(parser, ByteString(line), value) else HttpHeaderParser.insert(parser, ByteString(line), value) - def parseLine(line: String) = parser.parseHeaderLine(ByteString(line))() -> parser.resultHeader + def parseLine(line: String) = parser.parseHeaderLine(ByteString(line))() → parser.resultHeader def parseAndCache(lineA: String)(lineB: String = lineA): HttpHeader = { val (ixA, headerA) = parseLine(lineA) diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala index ebea46c..83077a5 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/parsing/RequestParserSpec.scala @@ -475,7 +475,7 @@ class RequestParserSpec extends FreeSpec with Matchers with BeforeAndAfterAll { def generalRawMultiParseTo(expected: Either[RequestOutput, HttpRequest]*): Matcher[Seq[String]] = generalRawMultiParseTo(newParser, expected: _*) - def generalRawMultiParseTo(parser: HttpRequestParser, + def generalRawMultiParseTo(parser: HttpRequestParser, expected: Either[RequestOutput, HttpRequest]*): Matcher[Seq[String]] = equal(expected.map(strictEqualify)) .matcher[Seq[Either[RequestOutput, StrictEqualHttpRequest]]] compose multiParse(parser) diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala index 6b1afd8..6a787e8 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/RequestRendererSpec.scala @@ -319,7 +319,7 @@ class RequestRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll override def afterAll() = system.terminate() class TestSetup(val userAgent: Option[`User-Agent`] = Some(`User-Agent`("akka-http/1.0.0")), - serverAddress: InetSocketAddress = new InetSocketAddress("test.com", 8080)) + serverAddress: InetSocketAddress = new InetSocketAddress("test.com", 8080)) extends HttpRequestRendererFactory(userAgent, requestHeaderSizeHint = 64, NoLogging) { def renderTo(expected: String): Matcher[HttpRequest] = diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala index 421fdfb..62cf194 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/rendering/ResponseRendererSpec.scala @@ -189,7 +189,7 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll "status 200 and a custom Transfer-Encoding header" in new TestSetup() { HttpResponse(headers = List(`Transfer-Encoding`(TransferEncodings.Extension("fancy"))), entity = "All good") should renderTo { - """HTTP/1.1 200 OK + """HTTP/1.1 200 OK |Transfer-Encoding: fancy |Server: akka-http/1.0.0 |Date: Thu, 25 Aug 2011 09:10:29 GMT @@ -197,7 +197,7 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll |Content-Length: 8 | |All good""" - } + } } } "a response with a Default (streamed with explicit content-length body," - { @@ -585,7 +585,7 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll renderTo(expected, close = false) compose (ResponseRenderingContext(_)) def renderTo(expected: String, close: Boolean): Matcher[ResponseRenderingContext] = - equal(expected.stripMarginWithNewline("\r\n") -> close).matcher[(String, Boolean)] compose { ctx ⇒ + equal(expected.stripMarginWithNewline("\r\n") → close).matcher[(String, Boolean)] compose { ctx ⇒ val (wasCompletedFuture, resultFuture) = (Source.single(ctx) ++ Source.maybe[ResponseRenderingContext]) // never send upstream completion .via(renderer.named("renderer")) @@ -605,7 +605,7 @@ class ResponseRendererSpec extends FreeSpec with Matchers with BeforeAndAfterAll } catch { case NonFatal(_) ⇒ false } - Await.result(resultFuture, 250.millis).reduceLeft(_ ++ _).utf8String -> wasCompleted + Await.result(resultFuture, 250.millis).reduceLeft(_ ++ _).utf8String → wasCompleted } override def currentTimeMillis() = DateTime(2011, 8, 25, 9, 10, 29).clicks // provide a stable date for testing diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala index 364febb..9986ccb 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerSpec.scala @@ -355,7 +355,7 @@ class HttpServerSpec extends AkkaSpec( } "proceed to next request once previous request's entity has beed drained" in new TestSetup with ScalaFutures { - def twice(action: => Unit): Unit = { action; action } + def twice(action: ⇒ Unit): Unit = { action; action } twice { send("""POST / HTTP/1.1 @@ -369,7 +369,7 @@ class HttpServerSpec extends AkkaSpec( |""") val whenComplete = expectRequest().entity.dataBytes.runWith(Sink.ignore) - whenComplete.futureValue should be (akka.Done) + whenComplete.futureValue should be(akka.Done) } } diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala index 39a681a..de92f24 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/server/HttpServerTestSetupBase.scala @@ -34,17 +34,16 @@ abstract class HttpServerTestSetupBase { val netIn = TestPublisher.probe[ByteString]() val netOut = ByteStringSinkProbe() - RunnableGraph.fromGraph(GraphDSL.create(HttpServerBluePrint(settings, remoteAddress = remoteAddress, log = NoLogging)) { implicit b ⇒ - server ⇒ - import GraphDSL.Implicits._ - Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> server.in2 - server.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x }.buffer(1, OverflowStrategy.backpressure) ~> netOut.sink - server.out2 ~> Sink.fromSubscriber(requests) - Source.fromPublisher(responses) ~> server.in1 - ClosedShape + RunnableGraph.fromGraph(GraphDSL.create(HttpServerBluePrint(settings, remoteAddress = remoteAddress, log = NoLogging)) { implicit b ⇒ server ⇒ + import GraphDSL.Implicits._ + Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> server.in2 + server.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x }.buffer(1, OverflowStrategy.backpressure) ~> netOut.sink + server.out2 ~> Sink.fromSubscriber(requests) + Source.fromPublisher(responses) ~> server.in1 + ClosedShape }).run() - netIn -> netOut + netIn → netOut } def expectResponseWithWipedDate(expected: String): Unit = { diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala index e507d18..08ed8a2 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/MessageSpec.scala @@ -23,7 +23,7 @@ class MessageSpec extends FreeSpec with Matchers with WithMaterializerSpec { val InvalidUtf8TwoByteSequence: ByteString = ByteString( (128 + 64).toByte, // start two byte sequence 0 // but don't finish it - ) + ) "The WebSocket implementation should" - { "collect messages from frames" - { diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala index 44c4dbe..b22bfe2 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSClientAutobahnTest.scala @@ -91,7 +91,7 @@ object WSClientAutobahnTest extends App { val res = getCaseCount().flatMap { count ⇒ println(s"Retrieving case info for $count cases...") - Future.traverse(1 to count)(getCaseInfo).map(_.map(e ⇒ e.caseInfo.id -> e).toMap) + Future.traverse(1 to count)(getCaseInfo).map(_.map(e ⇒ e.caseInfo.id → e).toMap) } res.foreach { res ⇒ println(s"Received info for ${res.size} cases") diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala index 72a27b3..90169c3 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestSetupBase.scala @@ -17,12 +17,12 @@ trait WSTestSetupBase extends Matchers { def expectBytes(bytes: ByteString): Unit def sendWSFrame(opcode: Opcode, - data: ByteString, - fin: Boolean, - mask: Boolean = false, - rsv1: Boolean = false, - rsv2: Boolean = false, - rsv3: Boolean = false): Unit = { + data: ByteString, + fin: Boolean, + mask: Boolean = false, + rsv1: Boolean = false, + rsv2: Boolean = false, + rsv3: Boolean = false): Unit = { val (theMask, theData) = if (mask) { val m = Random.nextInt() @@ -35,12 +35,12 @@ trait WSTestSetupBase extends Matchers { send(closeFrame(closeCode, mask)) def expectWSFrame(opcode: Opcode, - data: ByteString, - fin: Boolean, - mask: Option[Int] = None, - rsv1: Boolean = false, - rsv2: Boolean = false, - rsv3: Boolean = false): Unit = + data: ByteString, + fin: Boolean, + mask: Option[Int] = None, + rsv1: Boolean = false, + rsv2: Boolean = false, + rsv3: Boolean = false): Unit = expectBytes(frameHeader(opcode, data.length, fin, mask, rsv1, rsv2, rsv3) ++ data) def expectWSCloseFrame(closeCode: Int, mask: Boolean = false): Unit = diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala index f608bf5..3768c13 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WSTestUtils.scala @@ -13,11 +13,11 @@ object WSTestUtils { def frameHeader( opcode: Opcode, length: Long, - fin: Boolean, - mask: Option[Int] = None, - rsv1: Boolean = false, - rsv2: Boolean = false, - rsv3: Boolean = false): ByteString = { + fin: Boolean, + mask: Option[Int] = None, + rsv1: Boolean = false, + rsv2: Boolean = false, + rsv3: Boolean = false): ByteString = { def set(should: Boolean, mask: Int): Int = if (should) mask else 0 diff --git a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala index 2c0c4ed..8b9d110 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/engine/ws/WebSocketClientSpec.scala @@ -312,13 +312,12 @@ class WebSocketClientSpec extends FreeSpec with Matchers with WithMaterializerSp val netIn = TestPublisher.probe[ByteString]() val graph = - RunnableGraph.fromGraph(GraphDSL.create(clientLayer) { implicit b ⇒ - client ⇒ - import GraphDSL.Implicits._ - Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2 - client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> netOut.sink - client.out2 ~> clientImplementation ~> client.in1 - ClosedShape + RunnableGraph.fromGraph(GraphDSL.create(clientLayer) { implicit b ⇒ client ⇒ + import GraphDSL.Implicits._ + Source.fromPublisher(netIn) ~> Flow[ByteString].map(SessionBytes(null, _)) ~> client.in2 + client.out1 ~> Flow[SslTlsOutbound].collect { case SendBytes(x) ⇒ x } ~> netOut.sink + client.out2 ~> clientImplementation ~> client.in1 + ClosedShape }) val response = graph.run() diff --git a/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala b/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala index 9a2ac59..088f6c2 100644 --- a/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/impl/model/parser/HttpHeaderSpec.scala @@ -38,10 +38,10 @@ class HttpHeaderSpec extends FreeSpec with Matchers { Accept(`application/vnd.spray`) "Accept: */*, text/*; foo=bar, custom/custom; bar=\"b>az\"" =!= Accept(`*/*`, - MediaRange.custom("text", Map("foo" -> "bar")), - MediaType.customBinary("custom", "custom", MediaType.Compressible, params = Map("bar" -> "b>az"))) + MediaRange.custom("text", Map("foo" → "bar")), + MediaType.customBinary("custom", "custom", MediaType.Compressible, params = Map("bar" → "b>az"))) "Accept: application/*+xml; version=2" =!= - Accept(MediaType.customBinary("application", "*+xml", MediaType.Compressible, params = Map("version" -> "2"))) + Accept(MediaType.customBinary("application", "*+xml", MediaType.Compressible, params = Map("version" → "2"))) } "Accept-Charset" in { @@ -135,14 +135,14 @@ class HttpHeaderSpec extends FreeSpec with Matchers { Authorization(BasicHttpCredentials("Aladdin", "open sesame")).renderedTo( "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==") """Authorization: Fancy yes="n:o", nonce=42""" =!= - Authorization(GenericHttpCredentials("Fancy", Map("yes" -> "n:o", "nonce" -> "42"))).renderedTo( + Authorization(GenericHttpCredentials("Fancy", Map("yes" → "n:o", "nonce" → "42"))).renderedTo( """Fancy yes="n:o",nonce=42""") """Authorization: Fancy yes=no,nonce="4\\2"""" =!= - Authorization(GenericHttpCredentials("Fancy", Map("yes" -> "no", "nonce" -> """4\2"""))) + Authorization(GenericHttpCredentials("Fancy", Map("yes" → "no", "nonce" → """4\2"""))) "Authorization: Basic Qm9iOg==" =!= Authorization(BasicHttpCredentials("Bob", "")) """Authorization: Digest name=Bob""" =!= - Authorization(GenericHttpCredentials("Digest", Map("name" -> "Bob"))) + Authorization(GenericHttpCredentials("Digest", Map("name" → "Bob"))) """Authorization: Bearer mF_9.B5f-4.1JqM/""" =!= Authorization(OAuth2BearerToken("mF_9.B5f-4.1JqM/")) "Authorization: NoParamScheme" =!= @@ -179,7 +179,7 @@ class HttpHeaderSpec extends FreeSpec with Matchers { "Content-Disposition" in { "Content-Disposition: form-data" =!= `Content-Disposition`(ContentDispositionTypes.`form-data`) "Content-Disposition: attachment; name=field1; filename=\"file/txt\"" =!= - `Content-Disposition`(ContentDispositionTypes.attachment, Map("name" -> "field1", "filename" -> "file/txt")) + `Content-Disposition`(ContentDispositionTypes.attachment, Map("name" → "field1", "filename" → "file/txt")) } "Content-Encoding" in { @@ -201,7 +201,7 @@ class HttpHeaderSpec extends FreeSpec with Matchers { "Content-Type: text/plain; charset=utf8" =!= `Content-Type`(ContentType(`text/plain`, `UTF-8`)).renderedTo("text/plain; charset=UTF-8") "Content-Type: text/xml2; version=3; charset=windows-1252" =!= - `Content-Type`(MediaType.customWithOpenCharset("text", "xml2", params = Map("version" -> "3")) + `Content-Type`(MediaType.customWithOpenCharset("text", "xml2", params = Map("version" → "3")) withCharset HttpCharsets.getForKey("windows-1252").get) "Content-Type: text/plain; charset=fancy-pants" =!= `Content-Type`(`text/plain` withCharset HttpCharset.custom("fancy-pants")) @@ -224,17 +224,17 @@ class HttpHeaderSpec extends FreeSpec with Matchers { } "Cookie (RFC 6265)" in { - "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" -> "31d4d96e407aad42") - "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" -> "31d4d96e407aad42", "lang" -> "en>US") - "Cookie: a=1; b=2" =!= Cookie("a" -> "1", "b" -> "2") - "Cookie: a=1;b=2" =!= Cookie("a" -> "1", "b" -> "2").renderedTo("a=1; b=2") - "Cookie: a=1 ;b=2" =!= Cookie("a" -> "1", "b" -> "2").renderedTo("a=1; b=2") + "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" → "31d4d96e407aad42") + "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" → "31d4d96e407aad42", "lang" → "en>US") + "Cookie: a=1; b=2" =!= Cookie("a" → "1", "b" → "2") + "Cookie: a=1;b=2" =!= Cookie("a" → "1", "b" → "2").renderedTo("a=1; b=2") + "Cookie: a=1 ;b=2" =!= Cookie("a" → "1", "b" → "2").renderedTo("a=1; b=2") - "Cookie: z=0;a=1,b=2" =!= Cookie("z" -> "0").renderedTo("z=0") - """Cookie: a=1;b="test"""" =!= Cookie("a" -> "1", "b" -> "test").renderedTo("a=1; b=test") + "Cookie: z=0;a=1,b=2" =!= Cookie("z" → "0").renderedTo("z=0") + """Cookie: a=1;b="test"""" =!= Cookie("a" → "1", "b" → "test").renderedTo("a=1; b=test") - "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie("a" -> "1", "c" -> "xyz").renderedTo("a=1; c=xyz") - "Cookie: a=1; b=ä; c=d" =!= Cookie("a" -> "1", "c" -> "d").renderedTo("a=1; c=d") + "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie("a" → "1", "c" → "xyz").renderedTo("a=1; c=xyz") + "Cookie: a=1; b=ä; c=d" =!= Cookie("a" → "1", "c" → "d").renderedTo("a=1; c=d") "Cookie: a=1,2" =!= ErrorInfo( @@ -243,16 +243,16 @@ class HttpHeaderSpec extends FreeSpec with Matchers { } "Cookie (Raw)" in { - "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" -> "31d4d96e407aad42").withCookieParsingMode(CookieParsingMode.Raw) - "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" -> "31d4d96e407aad42", "lang" -> "en>US").withCookieParsingMode(CookieParsingMode.Raw) - "Cookie: a=1; b=2" =!= Cookie("a" -> "1", "b" -> "2").withCookieParsingMode(CookieParsingMode.Raw) - "Cookie: a=1;b=2" =!= Cookie("a" -> "1", "b" -> "2").renderedTo("a=1; b=2").withCookieParsingMode(CookieParsingMode.Raw) - "Cookie: a=1 ;b=2" =!= Cookie(List(HttpCookiePair.raw("a" -> "1 "), HttpCookiePair("b" -> "2"))).renderedTo("a=1 ; b=2").withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: SID=31d4d96e407aad42" =!= Cookie("SID" → "31d4d96e407aad42").withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: SID=31d4d96e407aad42; lang=en>US" =!= Cookie("SID" → "31d4d96e407aad42", "lang" → "en>US").withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: a=1; b=2" =!= Cookie("a" → "1", "b" → "2").withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: a=1;b=2" =!= Cookie("a" → "1", "b" → "2").renderedTo("a=1; b=2").withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: a=1 ;b=2" =!= Cookie(List(HttpCookiePair.raw("a" → "1 "), HttpCookiePair("b" → "2"))).renderedTo("a=1 ; b=2").withCookieParsingMode(CookieParsingMode.Raw) - "Cookie: z=0; a=1,b=2" =!= Cookie(List(HttpCookiePair("z" -> "0"), HttpCookiePair.raw("a" -> "1,b=2"))).withCookieParsingMode(CookieParsingMode.Raw) - """Cookie: a=1;b="test"""" =!= Cookie(List(HttpCookiePair("a" -> "1"), HttpCookiePair.raw("b" -> "\"test\""))).renderedTo("a=1; b=\"test\"").withCookieParsingMode(CookieParsingMode.Raw) - "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie(List(HttpCookiePair("a" -> "1"), HttpCookiePair.raw("b" -> "f\"d\"c\""), HttpCookiePair("c" -> "xyz"))).withCookieParsingMode(CookieParsingMode.Raw) - "Cookie: a=1; b=ä; c=d" =!= Cookie(List(HttpCookiePair("a" -> "1"), HttpCookiePair.raw("b" -> "ä"), HttpCookiePair("c" -> "d"))).withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: z=0; a=1,b=2" =!= Cookie(List(HttpCookiePair("z" → "0"), HttpCookiePair.raw("a" → "1,b=2"))).withCookieParsingMode(CookieParsingMode.Raw) + """Cookie: a=1;b="test"""" =!= Cookie(List(HttpCookiePair("a" → "1"), HttpCookiePair.raw("b" → "\"test\""))).renderedTo("a=1; b=\"test\"").withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: a=1; b=f\"d\"c\"; c=xyz" =!= Cookie(List(HttpCookiePair("a" → "1"), HttpCookiePair.raw("b" → "f\"d\"c\""), HttpCookiePair("c" → "xyz"))).withCookieParsingMode(CookieParsingMode.Raw) + "Cookie: a=1; b=ä; c=d" =!= Cookie(List(HttpCookiePair("a" → "1"), HttpCookiePair.raw("b" → "ä"), HttpCookiePair("c" → "d"))).withCookieParsingMode(CookieParsingMode.Raw) } "Date" in { @@ -367,12 +367,12 @@ class HttpHeaderSpec extends FreeSpec with Matchers { "Proxy-Authenticate" in { "Proxy-Authenticate: Basic realm=\"WallyWorld\",attr=\"val>ue\", Fancy realm=\"yeah\"" =!= - `Proxy-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" -> "val>ue")), HttpChallenge("Fancy", "yeah")) + `Proxy-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" → "val>ue")), HttpChallenge("Fancy", "yeah")) } "Proxy-Authorization" in { """Proxy-Authorization: Fancy yes=no,nonce="4\\2"""" =!= - `Proxy-Authorization`(GenericHttpCredentials("Fancy", Map("yes" -> "no", "nonce" -> """4\2"""))) + `Proxy-Authorization`(GenericHttpCredentials("Fancy", Map("yes" → "no", "nonce" → """4\2"""))) } "Referer" in { @@ -407,19 +407,19 @@ class HttpHeaderSpec extends FreeSpec with Matchers { "Sec-WebSocket-Extensions: abc, def" =!= `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc"), WebSocketExtension("def"))) "Sec-WebSocket-Extensions: abc; param=2; use_y, def" =!= - `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" -> "2", "use_y" -> "")), WebSocketExtension("def"))) + `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" → "2", "use_y" → "")), WebSocketExtension("def"))) "Sec-WebSocket-Extensions: abc; param=\",xyz\", def" =!= - `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" -> ",xyz")), WebSocketExtension("def"))) + `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("abc", Map("param" → ",xyz")), WebSocketExtension("def"))) // real examples from https://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-19 "Sec-WebSocket-Extensions: permessage-deflate" =!= `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("permessage-deflate"))) "Sec-WebSocket-Extensions: permessage-deflate; client_max_window_bits; server_max_window_bits=10" =!= - `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("permessage-deflate", Map("client_max_window_bits" -> "", "server_max_window_bits" -> "10")))) + `Sec-WebSocket-Extensions`(Vector(WebSocketExtension("permessage-deflate", Map("client_max_window_bits" → "", "server_max_window_bits" → "10")))) "Sec-WebSocket-Extensions: permessage-deflate; client_max_window_bits; server_max_window_bits=10, permessage-deflate; client_max_window_bits" =!= `Sec-WebSocket-Extensions`(Vector( - WebSocketExtension("permessage-deflate", Map("client_max_window_bits" -> "", "server_max_window_bits" -> "10")), - WebSocketExtension("permessage-deflate", Map("client_max_window_bits" -> "")))) + WebSocketExtension("permessage-deflate", Map("client_max_window_bits" → "", "server_max_window_bits" → "10")), + WebSocketExtension("permessage-deflate", Map("client_max_window_bits" → "")))) } "Sec-WebSocket-Key" in { "Sec-WebSocket-Key: c2Zxb3JpbmgyMzA5dGpoMDIzOWdlcm5vZ2luCg==" =!= `Sec-WebSocket-Key`("c2Zxb3JpbmgyMzA5dGpoMDIzOWdlcm5vZ2luCg==") @@ -537,13 +537,13 @@ class HttpHeaderSpec extends FreeSpec with Matchers { qop="auth,auth-int", nonce=dcd98b7102dd2f0e8b11d0f600bfb0c093, opaque=5ccc069c403ebaf9f0171e9517f40e41""".stripMarginWithNewline("\r\n") =!= - `WWW-Authenticate`(HttpChallenge("Digest", "testrealm@host.com", Map("qop" -> "auth,auth-int", - "nonce" -> "dcd98b7102dd2f0e8b11d0f600bfb0c093", "opaque" -> "5ccc069c403ebaf9f0171e9517f40e41"))).renderedTo( + `WWW-Authenticate`(HttpChallenge("Digest", "testrealm@host.com", Map("qop" → "auth,auth-int", + "nonce" → "dcd98b7102dd2f0e8b11d0f600bfb0c093", "opaque" → "5ccc069c403ebaf9f0171e9517f40e41"))).renderedTo( "Digest realm=\"testrealm@host.com\",qop=\"auth,auth-int\",nonce=dcd98b7102dd2f0e8b11d0f600bfb0c093,opaque=5ccc069c403ebaf9f0171e9517f40e41") "WWW-Authenticate: Basic realm=\"WallyWorld\",attr=\"val>ue\", Fancy realm=\"yeah\"" =!= - `WWW-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" -> "val>ue")), HttpChallenge("Fancy", "yeah")) + `WWW-Authenticate`(HttpChallenge("Basic", "WallyWorld", Map("attr" → "val>ue")), HttpChallenge("Fancy", "yeah")) """WWW-Authenticate: Fancy realm="Secure Area",nonce=42""" =!= - `WWW-Authenticate`(HttpChallenge("Fancy", "Secure Area", Map("nonce" -> "42"))) + `WWW-Authenticate`(HttpChallenge("Fancy", "Secure Area", Map("nonce" → "42"))) } "X-Forwarded-For" in { diff --git a/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala b/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala index 609ed76..aaafb8e 100644 --- a/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/javadsl/model/JavaApiSpec.scala @@ -42,16 +42,16 @@ class JavaApiSpec extends FreeSpec with MustMatchers { } "access parameterMap" in { Uri.create("/abc?name=blub&age=28") - .query().toMap.asScala must contain allOf ("name" -> "blub", "age" -> "28") + .query().toMap.asScala must contain allOf ("name" → "blub", "age" → "28") } "access parameters" in { val Seq(param1, param2, param3) = Uri.create("/abc?name=blub&age=28&name=blub2") .query().toList.asScala.map(_.toScala) - param1 must be("name" -> "blub") - param2 must be("age" -> "28") - param3 must be("name" -> "blub2") + param1 must be("name" → "blub") + param2 must be("age" → "28") + param3 must be("name" → "blub2") } "access single parameter" in { val query = Uri.create("/abc?name=blub").query() diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala index 3b7c7bc..4006f27 100644 --- a/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/scaladsl/ClientServerSpec.scala @@ -248,7 +248,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit def runRequest(uri: Uri): Future[(Try[HttpResponse], Int)] = { val itNeverSends = Chunked.fromData(ContentTypes.`text/plain(UTF-8)`, Source.maybe[ByteString]) - Source.single(HttpRequest(POST, uri, entity = itNeverSends) -> 1) + Source.single(HttpRequest(POST, uri, entity = itNeverSends) → 1) .via(pool) .runWith(Sink.head) } @@ -480,7 +480,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit connection.remoteAddress.getHostName shouldEqual hostname connection.remoteAddress.getPort shouldEqual port - requestPublisherProbe -> responseSubscriberProbe + requestPublisherProbe → responseSubscriberProbe } def acceptConnection(): (TestSubscriber.ManualProbe[HttpRequest], TestPublisher.ManualProbe[HttpResponse]) = { @@ -497,7 +497,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit pub.subscribe(requestSubscriberProbe) responsePublisherProbe.subscribe(sub) - requestSubscriberProbe -> responsePublisherProbe + requestSubscriberProbe → responsePublisherProbe } def openClientSocket() = new Socket(hostname, port) @@ -513,7 +513,7 @@ class ClientServerSpec extends WordSpec with Matchers with BeforeAndAfterAll wit val sb = new java.lang.StringBuilder val cbuf = new Array[Char](256) @tailrec def drain(): (String, BufferedReader) = reader.read(cbuf) match { - case -1 ⇒ sb.toString -> reader + case -1 ⇒ sb.toString → reader case n ⇒ sb.append(cbuf, 0, n); drain() } drain() diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala index 2b35b8e..a37f21f 100644 --- a/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/scaladsl/model/MultipartSpec.scala @@ -43,7 +43,7 @@ class MultipartSpec extends WordSpec with Matchers with Inside with BeforeAndAft Multipart.FormData.BodyPart("bar", defaultEntity("BAR")) :: Nil)) val strict = Await.result(streamed.toStrict(1.second), 1.second) - strict shouldEqual Multipart.FormData(Map("foo" -> HttpEntity("FOO"), "bar" -> HttpEntity("BAR"))) + strict shouldEqual Multipart.FormData(Map("foo" → HttpEntity("FOO"), "bar" → HttpEntity("BAR"))) } } diff --git a/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala b/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala index 454bb1d..6f6db56 100644 --- a/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala +++ b/akka-http-core/src/test/scala/akka/http/scaladsl/model/UriSpec.scala @@ -305,29 +305,29 @@ class UriSpec extends WordSpec with Matchers { query.getOrElse("d", "x") shouldEqual "x" query.getAll("b") shouldEqual List("", "4", "2") query.getAll("d") shouldEqual Nil - query.toMap shouldEqual Map("a" -> "1", "b" -> "", "c" -> "3") - query.toMultiMap shouldEqual Map("a" -> List("1"), "b" -> List("", "4", "2"), "c" -> List("3")) - query.toList shouldEqual List("a" -> "1", "b" -> "2", "c" -> "3", "b" -> "4", "b" -> "") - query.toSeq shouldEqual Seq("a" -> "1", "b" -> "2", "c" -> "3", "b" -> "4", "b" -> "") + query.toMap shouldEqual Map("a" → "1", "b" → "", "c" → "3") + query.toMultiMap shouldEqual Map("a" → List("1"), "b" → List("", "4", "2"), "c" → List("3")) + query.toList shouldEqual List("a" → "1", "b" → "2", "c" → "3", "b" → "4", "b" → "") + query.toSeq shouldEqual Seq("a" → "1", "b" → "2", "c" → "3", "b" → "4", "b" → "") } "support conversion from list of name/value pairs" in { import Query._ - val pairs = List("key1" -> "value1", "key2" -> "value2", "key3" -> "value3") + val pairs = List("key1" → "value1", "key2" → "value2", "key3" → "value3") Query(pairs: _*).toList.diff(pairs) shouldEqual Nil Query() shouldEqual Empty - Query("k" -> "v") shouldEqual ("k" -> "v") +: Empty + Query("k" → "v") shouldEqual ("k" → "v") +: Empty } "encode special separators in query parameter names" in { - Query("a=b" -> "c").toString() shouldEqual "a%3Db=c" - Query("a&b" -> "c").toString() shouldEqual "a%26b=c" - Query("a+b" -> "c").toString() shouldEqual "a%2Bb=c" - Query("a;b" -> "c").toString() shouldEqual "a%3Bb=c" + Query("a=b" → "c").toString() shouldEqual "a%3Db=c" + Query("a&b" → "c").toString() shouldEqual "a%26b=c" + Query("a+b" → "c").toString() shouldEqual "a%2Bb=c" + Query("a;b" → "c").toString() shouldEqual "a%3Bb=c" } "encode special separators in query parameter values" in { - Query("a" -> "b=c").toString() shouldEqual "a=b%3Dc" - Query("a" -> "b&c").toString() shouldEqual "a=b%26c" - Query("a" -> "b+c").toString() shouldEqual "a=b%2Bc" - Query("a" -> "b;c").toString() shouldEqual "a=b%3Bc" + Query("a" → "b=c").toString() shouldEqual "a=b%3Dc" + Query("a" → "b&c").toString() shouldEqual "a=b%26c" + Query("a" → "b+c").toString() shouldEqual "a=b%2Bc" + Query("a" → "b;c").toString() shouldEqual "a=b%3Bc" } } @@ -456,7 +456,7 @@ class UriSpec extends WordSpec with Matchers { "support tunneling a URI through a query param" in { val uri = Uri("http://aHost/aPath?aParam=aValue#aFragment") - val q = Query("uri" -> uri.toString) + val q = Query("uri" → uri.toString) val uri2 = Uri(path = Path./, fragment = Some("aFragment")).withQuery(q).toString uri2 shouldEqual "/?uri=http://ahost/aPath?aParam%3DaValue%23aFragment#aFragment" Uri(uri2).query() shouldEqual q @@ -588,8 +588,8 @@ class UriSpec extends WordSpec with Matchers { uri.withPath(Path("/newpath")) shouldEqual Uri("http://host/newpath?query#fragment") uri.withUserInfo("someInfo") shouldEqual Uri("http://someInfo@host:80/path?query#fragment") - uri.withQuery(Query("param1" -> "value1")) shouldEqual Uri("http://host:80/path?param1=value1#fragment") - uri.withQuery(Query(Map("param1" -> "value1"))) shouldEqual Uri("http://host:80/path?param1=value1#fragment") + uri.withQuery(Query("param1" → "value1")) shouldEqual Uri("http://host:80/path?param1=value1#fragment") + uri.withQuery(Query(Map("param1" → "value1"))) shouldEqual Uri("http://host:80/path?param1=value1#fragment") uri.withRawQueryString("param1=value1") shouldEqual Uri("http://host:80/path?param1=value1#fragment") uri.withFragment("otherFragment") shouldEqual Uri("http://host:80/path?query#otherFragment") diff --git a/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala b/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala index e482e9a..638756b 100644 --- a/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala +++ b/akka-http-core/src/test/scala/io/akka/integrationtest/http/HttpModelIntegrationSpec.scala @@ -79,10 +79,10 @@ class HttpModelIntegrationSpec extends WordSpec with Matchers with BeforeAndAfte } val textHeaders: Seq[(String, String)] = entityTextHeaders ++ partialTextHeaders textHeaders shouldEqual Seq( - "Content-Type" -> "application/json", - "Content-Length" -> "5", - "Host" -> "localhost", - "Origin" -> "null") + "Content-Type" → "application/json", + "Content-Length" → "5", + "Host" → "localhost", + "Origin" → "null") // Finally convert the body into an Array[Byte]. @@ -98,9 +98,9 @@ class HttpModelIntegrationSpec extends WordSpec with Matchers with BeforeAndAfte // example simple model of an HTTP response. val textHeaders: Seq[(String, String)] = Seq( - "Content-Type" -> "text/plain", - "Content-Length" -> "3", - "X-Greeting" -> "Hello") + "Content-Type" → "text/plain", + "Content-Length" → "3", + "X-Greeting" → "Hello") val byteArrayBody: Array[Byte] = "foo".getBytes // Now we need to convert this model to Akka HTTP's model. To do that diff --git a/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala b/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala index 6269a96..5915514 100644 --- a/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala +++ b/akka-http-testkit/src/main/scala/akka/http/scaladsl/testkit/RouteTest.scala @@ -134,11 +134,11 @@ trait RouteTest extends RequestBuilding with WSTestRequestBuilding with RouteTes def apply(request: HttpRequest, f: HttpRequest ⇒ HttpRequest) = f(request) } implicit def injectIntoRoute(implicit timeout: RouteTestTimeout, - defaultHostInfo: DefaultHostInfo, - routingSettings: RoutingSettings, + defaultHostInfo: DefaultHostInfo, + routingSettings: RoutingSettings, executionContext: ExecutionContext, - materializer: Materializer, - routingLog: RoutingLog, + materializer: Materializer, + routingLog: RoutingLog, rejectionHandler: RejectionHandler = RejectionHandler.default, exceptionHandler: ExceptionHandler = null) = new TildeArrow[RequestContext, Future[RouteResult]] { diff --git a/akka-http/src/main/scala/akka/http/impl/server/FormFieldImpl.scala b/akka-http/src/main/scala/akka/http/impl/server/FormFieldImpl.scala index 01ff7d2..f2b1643 100644 --- a/akka-http/src/main/scala/akka/http/impl/server/FormFieldImpl.scala +++ b/akka-http/src/main/scala/akka/http/impl/server/FormFieldImpl.scala @@ -20,7 +20,8 @@ import scala.compat.java8.OptionConverters._ * INTERNAL API */ private[http] class FormFieldImpl[T, U](receptacle: NameReceptacle[T])( - implicit fu: FromStrictFormFieldUnmarshaller[T], tTag: ClassTag[U], conv: T ⇒ U) + implicit + fu: FromStrictFormFieldUnmarshaller[T], tTag: ClassTag[U], conv: T ⇒ U) extends StandaloneExtractionImpl[U] with FormField[U] { import Directives._ diff --git a/akka-http/src/main/scala/akka/http/impl/server/HeaderImpl.scala b/akka-http/src/main/scala/akka/http/impl/server/HeaderImpl.scala index e504359..bdcd8d9 100644 --- a/akka-http/src/main/scala/akka/http/impl/server/HeaderImpl.scala +++ b/akka-http/src/main/scala/akka/http/impl/server/HeaderImpl.scala @@ -22,7 +22,7 @@ import scala.reflect.ClassTag */ private[http] object HeaderImpl { def apply[T <: HttpHeader]( - name: String, + name: String, optionalDirective: ClassTag[T with scaladsl.model.HttpHeader] ⇒ Directive1[Option[T with scaladsl.model.HttpHeader]], tClassTag: ClassTag[T]): Header[T] = { type U = T with scaladsl.model.HttpHeader diff --git a/akka-http/src/main/scala/akka/http/impl/server/ParameterImpl.scala b/akka-http/src/main/scala/akka/http/impl/server/ParameterImpl.scala index 7a2c6b3..2c37596 100644 --- a/akka-http/src/main/scala/akka/http/impl/server/ParameterImpl.scala +++ b/akka-http/src/main/scala/akka/http/impl/server/ParameterImpl.scala @@ -22,7 +22,8 @@ import scala.compat.java8.OptionConverters._ * INTERNAL API */ private[http] class ParameterImpl[T, U](receptacle: NameReceptacle[T])( - implicit fu: FromStringUnmarshaller[T], tTag: ClassTag[U], conv: T ⇒ U) + implicit + fu: FromStringUnmarshaller[T], tTag: ClassTag[U], conv: T ⇒ U) extends StandaloneExtractionImpl[U] with Parameter[U] { import ParameterDirectives._ diff --git a/akka-http/src/main/scala/akka/http/impl/server/RouteImplementation.scala b/akka-http/src/main/scala/akka/http/impl/server/RouteImplementation.scala index 2a8003f..315c172 100644 --- a/akka-http/src/main/scala/akka/http/impl/server/RouteImplementation.scala +++ b/akka-http/src/main/scala/akka/http/impl/server/RouteImplementation.scala @@ -180,9 +180,9 @@ private[http] object RouteImplementation extends Directives with server.RouteCon case p: Product ⇒ extractExecutionContext { implicit ec ⇒ complete((500, s"Not implemented: ${p.productPrefix}")) } } } - def pathMatcherDirective[T](matchers: immutable.Seq[PathMatcher[_]], + def pathMatcherDirective[T](matchers: immutable.Seq[PathMatcher[_]], directive: PathMatcher1[T] ⇒ Directive1[T] // this type is too specific and only a placeholder for a proper polymorphic function - ): Directive0 = { + ): Directive0 = { // Concatenating PathMatchers is a bit complicated as we don't want to build up a tuple // but something which we can later split all the separate values and add them to the // ExtractionMap. @@ -197,7 +197,7 @@ private[http] object RouteImplementation extends Directives with server.RouteCon Tuple1(prefix._1 ++ suffix._1) } def toScala(matcher: PathMatcher[_]): ScalaPathMatcher[ValMap] = - matcher.asInstanceOf[PathMatcherImpl[_]].matcher.transform(_.map(v ⇒ Tuple1(Map(matcher -> v._1)))) + matcher.asInstanceOf[PathMatcherImpl[_]].matcher.transform(_.map(v ⇒ Tuple1(Map(matcher → v._1)))) def addExtractions(valMap: T): Directive0 = transformExtractionMap(_.addAll(valMap.asInstanceOf[Map[RequestVal[_], Any]])) val reduced: ScalaPathMatcher[ValMap] = matchers.map(toScala).reduce(_.~(_)(AddToMapJoin)) directive(reduced.asInstanceOf[PathMatcher1[T]]).flatMap(addExtractions) diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala index 3f2316d..99f32ad 100644 --- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala +++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/BasicDirectives.scala @@ -169,7 +169,7 @@ abstract class BasicDirectives extends BasicDirectivesBase { /** Makes sure both RouteResult and Future[RouteResult] are acceptable result types. */ def adaptResult(method: Method): (RequestContext, AnyRef) ⇒ RouteResult = if (returnsFuture(method)) (ctx, v) ⇒ ctx.completeWith(v.asInstanceOf[Future[RouteResult]].toJava) - else if (returnsCompletionStage(method)) (ctx, v) => ctx.completeWith(v.asInstanceOf[CompletionStage[RouteResult]]) + else if (returnsCompletionStage(method)) (ctx, v) ⇒ ctx.completeWith(v.asInstanceOf[CompletionStage[RouteResult]]) else (_, v) ⇒ v.asInstanceOf[RouteResult] val IdentityAdaptor: (RequestContext, Seq[Any]) ⇒ Seq[Any] = (_, ps) ⇒ ps diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/directives/MiscDirectives.scala b/akka-http/src/main/scala/akka/http/javadsl/server/directives/MiscDirectives.scala index b505eb5..bfa740e 100644 --- a/akka-http/src/main/scala/akka/http/javadsl/server/directives/MiscDirectives.scala +++ b/akka-http/src/main/scala/akka/http/javadsl/server/directives/MiscDirectives.scala @@ -36,10 +36,10 @@ abstract class MiscDirectives extends MethodDirectives { * route is rejected with a [[akka.http.scaladsl.server.ValidationRejection]]. */ @varargs - def validate[T1, T2](value1: RequestVal[T1], - value2: RequestVal[T2], - check: Function2[T1, T2, JBoolean], - errorMsg: String, + def validate[T1, T2](value1: RequestVal[T1], + value2: RequestVal[T2], + check: Function2[T1, T2, JBoolean], + errorMsg: String, innerRoute: Route, moreInnerRoutes: Route*): Route = new DynamicDirectiveRoute2[T1, T2](value1, value2)(innerRoute, moreInnerRoutes.toList) { def createDirective(t1: T1, t2: T2): Directive = Directives.custom(Validated(check.apply(t1, t2), errorMsg)) diff --git a/akka-http/src/main/scala/akka/http/javadsl/server/values/PathMatchers.scala b/akka-http/src/main/scala/akka/http/javadsl/server/values/PathMatchers.scala index 1c6cebb..68b1140 100644 --- a/akka-http/src/main/scala/akka/http/javadsl/server/values/PathMatchers.scala +++ b/akka-http/src/main/scala/akka/http/javadsl/server/values/PathMatchers.scala @@ -51,7 +51,7 @@ object PathMatchers { * Creates a PathMatcher that consumes (a prefix of) the first path segment * (if the path begins with a segment) and extracts a given value. */ - def segment(name: String): PathMatcher[String] = matcher(_ ⇒ name -> name) + def segment(name: String): PathMatcher[String] = matcher(_ ⇒ name → name) /** * A PathMatcher that efficiently matches a number of digits and extracts their (non-negative) Int value. diff --git a/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala b/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala index dd90569..d91ecdc 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/coding/Decoder.scala @@ -7,7 +7,7 @@ package akka.http.scaladsl.coding import akka.NotUsed import akka.http.scaladsl.model._ import akka.stream.{ FlowShape, Materializer } -import akka.stream.stage.{ GraphStage} +import akka.stream.stage.{ GraphStage } import akka.util.ByteString import headers.HttpEncoding import akka.stream.scaladsl.{ Sink, Source, Flow } diff --git a/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala b/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala index 8613630..59c4dd9 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/coding/Gzip.scala @@ -125,5 +125,5 @@ private[http] object GzipDecompressor { 0, // MTIME 4 0, // XFL 0 // OS - ) + ) } diff --git a/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala b/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala index b485181..6e74ad9 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/coding/NoCoding.scala @@ -7,7 +7,7 @@ package akka.http.scaladsl.coding import akka.http.scaladsl.model._ import akka.http.impl.util.StreamUtils import akka.stream.FlowShape -import akka.stream.stage.{ GraphStage} +import akka.stream.stage.{ GraphStage } import akka.util.ByteString import headers.HttpEncodings diff --git a/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala b/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala index 930da14..5e3ea5b 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/common/StrictForm.scala @@ -93,36 +93,35 @@ object StrictForm { implicit def unmarshaller(implicit formDataUM: FromEntityUnmarshaller[FormData], multipartUM: FromEntityUnmarshaller[Multipart.FormData]): FromEntityUnmarshaller[StrictForm] = - Unmarshaller.withMaterializer { implicit ec ⇒ - implicit fm ⇒ - entity ⇒ + Unmarshaller.withMaterializer { implicit ec ⇒ implicit fm ⇒ + entity ⇒ - def tryUnmarshalToQueryForm: Future[StrictForm] = - for (formData ← formDataUM(entity).fast) yield { - new StrictForm { - val fields = formData.fields.map { case (name, value) ⇒ name -> Field.FromString(value) }(collection.breakOut) - } + def tryUnmarshalToQueryForm: Future[StrictForm] = + for (formData ← formDataUM(entity).fast) yield { + new StrictForm { + val fields = formData.fields.map { case (name, value) ⇒ name → Field.FromString(value) }(collection.breakOut) } + } - def tryUnmarshalToMultipartForm: Future[StrictForm] = - for { - multiPartFD ← multipartUM(entity).fast - strictMultiPartFD ← multiPartFD.toStrict(10.seconds).fast // TODO: make timeout configurable - } yield { - new StrictForm { - val fields = strictMultiPartFD.strictParts.map { - case x: Multipart.FormData.BodyPart.Strict ⇒ x.name -> Field.FromPart(x) - }(collection.breakOut) - } + def tryUnmarshalToMultipartForm: Future[StrictForm] = + for { + multiPartFD ← multipartUM(entity).fast + strictMultiPartFD ← multiPartFD.toStrict(10.seconds).fast // TODO: make timeout configurable + } yield { + new StrictForm { + val fields = strictMultiPartFD.strictParts.map { + case x: Multipart.FormData.BodyPart.Strict ⇒ x.name → Field.FromPart(x) + }(collection.breakOut) } - - tryUnmarshalToQueryForm.fast.recoverWith { - case Unmarshaller.UnsupportedContentTypeException(supported1) ⇒ - tryUnmarshalToMultipartForm.fast.recoverWith { - case Unmarshaller.UnsupportedContentTypeException(supported2) ⇒ - FastFuture.failed(Unmarshaller.UnsupportedContentTypeException(supported1 ++ supported2)) - } } + + tryUnmarshalToQueryForm.fast.recoverWith { + case Unmarshaller.UnsupportedContentTypeException(supported1) ⇒ + tryUnmarshalToMultipartForm.fast.recoverWith { + case Unmarshaller.UnsupportedContentTypeException(supported2) ⇒ + FastFuture.failed(Unmarshaller.UnsupportedContentTypeException(supported1 ++ supported2)) + } + } } /** diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala index 4c78a9a..b7b74df 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/ContentTypeOverrider.scala @@ -21,7 +21,7 @@ object ContentTypeOverrider { implicit def forHeadersAndEntity[T <: HttpEntity]: ContentTypeOverrider[(immutable.Seq[HttpHeader], T)] = new ContentTypeOverrider[(immutable.Seq[HttpHeader], T)] { def apply(value: (immutable.Seq[HttpHeader], T), newContentType: ContentType) = - value._1 -> value._2.withContentType(newContentType).asInstanceOf[T] + value._1 → value._2.withContentType(newContentType).asInstanceOf[T] } implicit val forResponse: ContentTypeOverrider[HttpResponse] = diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala index d10d993..28dd4fc 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/EmptyValue.scala @@ -14,7 +14,7 @@ object EmptyValue { new EmptyValue[UniversalEntity](HttpEntity.Empty) implicit val emptyHeadersAndEntity: EmptyValue[(immutable.Seq[HttpHeader], UniversalEntity)] = - new EmptyValue[(immutable.Seq[HttpHeader], UniversalEntity)](Nil -> HttpEntity.Empty) + new EmptyValue[(immutable.Seq[HttpHeader], UniversalEntity)](Nil → HttpEntity.Empty) implicit val emptyResponse: EmptyValue[HttpResponse] = new EmptyValue[HttpResponse](HttpResponse(entity = emptyEntity.emptyValue)) diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala index 45fa6ab..bf03201 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/Marshaller.scala @@ -35,36 +35,35 @@ sealed abstract class Marshaller[-A, +B] { * If the wrapping is illegal the [[Future]] produced by the resulting marshaller will contain a [[RuntimeException]]. */ def wrapWithEC[C, D >: B](newMediaType: MediaType)(f: ExecutionContext ⇒ C ⇒ A)(implicit cto: ContentTypeOverrider[D]): Marshaller[C, D] = - Marshaller { implicit ec ⇒ - value ⇒ - import Marshalling._ - this(f(ec)(value)).fast map { - _ map { - (_, newMediaType) match { - case (WithFixedContentType(_, marshal), newMT: MediaType.Binary) ⇒ - WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) - case (WithFixedContentType(oldCT: ContentType.Binary, marshal), newMT: MediaType.WithFixedCharset) ⇒ - WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) - case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithFixedCharset) if oldCT.charset == newMT.charset ⇒ - WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) - case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithOpenCharset) ⇒ - val newCT = newMT withCharset oldCT.charset - WithFixedContentType(newCT, () ⇒ cto(marshal(), newCT)) - - case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithOpenCharset) ⇒ - WithOpenCharset(newMT, cs ⇒ cto(marshal(cs), newMT withCharset cs)) - case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithFixedCharset) ⇒ - WithFixedContentType(newMT, () ⇒ cto(marshal(newMT.charset), newMT)) - - case (Opaque(marshal), newMT: MediaType.Binary) ⇒ - WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) - case (Opaque(marshal), newMT: MediaType.WithFixedCharset) ⇒ - WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) - - case x ⇒ sys.error(s"Illegal marshaller wrapping. Marshalling `$x` cannot be wrapped with MediaType `$newMediaType`") - } + Marshaller { implicit ec ⇒ value ⇒ + import Marshalling._ + this(f(ec)(value)).fast map { + _ map { + (_, newMediaType) match { + case (WithFixedContentType(_, marshal), newMT: MediaType.Binary) ⇒ + WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) + case (WithFixedContentType(oldCT: ContentType.Binary, marshal), newMT: MediaType.WithFixedCharset) ⇒ + WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) + case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithFixedCharset) if oldCT.charset == newMT.charset ⇒ + WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) + case (WithFixedContentType(oldCT: ContentType.NonBinary, marshal), newMT: MediaType.WithOpenCharset) ⇒ + val newCT = newMT withCharset oldCT.charset + WithFixedContentType(newCT, () ⇒ cto(marshal(), newCT)) + + case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithOpenCharset) ⇒ + WithOpenCharset(newMT, cs ⇒ cto(marshal(cs), newMT withCharset cs)) + case (WithOpenCharset(oldMT, marshal), newMT: MediaType.WithFixedCharset) ⇒ + WithFixedContentType(newMT, () ⇒ cto(marshal(newMT.charset), newMT)) + + case (Opaque(marshal), newMT: MediaType.Binary) ⇒ + WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) + case (Opaque(marshal), newMT: MediaType.WithFixedCharset) ⇒ + WithFixedContentType(newMT, () ⇒ cto(marshal(), newMT)) + + case x ⇒ sys.error(s"Illegal marshaller wrapping. Marshalling `$x` cannot be wrapped with MediaType `$newMediaType`") } } + } } def compose[C](f: C ⇒ A): Marshaller[C, B] = @@ -152,7 +151,7 @@ object Marshalling { * A Marshalling to a specific [[ContentType]]. */ final case class WithFixedContentType[A](contentType: ContentType, - marshal: () ⇒ A) extends Marshalling[A] { + marshal: () ⇒ A) extends Marshalling[A] { def map[B](f: A ⇒ B): WithFixedContentType[B] = copy(marshal = () ⇒ f(marshal())) } @@ -160,7 +159,7 @@ object Marshalling { * A Marshalling to a specific [[MediaType]] with a flexible charset. */ final case class WithOpenCharset[A](mediaType: MediaType.WithOpenCharset, - marshal: HttpCharset ⇒ A) extends Marshalling[A] { + marshal: HttpCharset ⇒ A) extends Marshalling[A] { def map[B](f: A ⇒ B): WithOpenCharset[B] = copy(marshal = cs ⇒ f(marshal(cs))) } diff --git a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala index fa612d8..412efc0 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/marshalling/PredefinedToResponseMarshallers.scala @@ -15,9 +15,10 @@ trait PredefinedToResponseMarshallers extends LowPriorityToResponseMarshallerImp private type TRM[T] = ToResponseMarshaller[T] // brevity alias - def fromToEntityMarshaller[T](status: StatusCode = StatusCodes.OK, + def fromToEntityMarshaller[T](status: StatusCode = StatusCodes.OK, headers: immutable.Seq[HttpHeader] = Nil)( - implicit m: ToEntityMarshaller[T]): ToResponseMarshaller[T] = + implicit + m: ToEntityMarshaller[T]): ToResponseMarshaller[T] = fromStatusCodeAndHeadersAndValue compose (t ⇒ (status, headers, t)) implicit val fromResponse: TRM[HttpResponse] = Marshaller.opaque(ConstantFun.scalaIdentityFunction) diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala index dd8de09..ff23f60 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/Directive.scala @@ -44,12 +44,11 @@ abstract class Directive[L](implicit val ev: Tuple[L]) { def as[A](constructor: ConstructFromTuple[L, A]): Directive1[A] = { def validatedMap[R](f: L ⇒ R)(implicit tupler: Tupler[R]): Directive[tupler.Out] = Directive[tupler.Out] { inner ⇒ - tapply { values ⇒ - ctx ⇒ - try inner(tupler(f(values)))(ctx) - catch { - case e: IllegalArgumentException ⇒ ctx.reject(ValidationRejection(e.getMessage.nullAsEmpty, Some(e))) - } + tapply { values ⇒ ctx ⇒ + try inner(tupler(f(values)))(ctx) + catch { + case e: IllegalArgumentException ⇒ ctx.reject(ValidationRejection(e.getMessage.nullAsEmpty, Some(e))) + } } }(tupler.OutIsTuple) @@ -88,14 +87,13 @@ abstract class Directive[L](implicit val ev: Tuple[L]) { * **before the inner route was applied**. */ def recover[R >: L: Tuple](recovery: immutable.Seq[Rejection] ⇒ Directive[R]): Directive[R] = - Directive[R] { inner ⇒ - ctx ⇒ - import ctx.executionContext - @volatile var rejectedFromInnerRoute = false - tapply({ list ⇒ c ⇒ rejectedFromInnerRoute = true; inner(list)(c) })(ctx).fast.flatMap { - case RouteResult.Rejected(rejections) if !rejectedFromInnerRoute ⇒ recovery(rejections).tapply(inner)(ctx) - case x ⇒ FastFuture.successful(x) - } + Directive[R] { inner ⇒ ctx ⇒ + import ctx.executionContext + @volatile var rejectedFromInnerRoute = false + tapply({ list ⇒ c ⇒ rejectedFromInnerRoute = true; inner(list)(c) })(ctx).fast.flatMap { + case RouteResult.Rejected(rejections) if !rejectedFromInnerRoute ⇒ recovery(rejections).tapply(inner)(ctx) + case x ⇒ FastFuture.successful(x) + } } /** diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala index 0ba2b4f..d197245 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/Rejection.scala @@ -129,7 +129,7 @@ object UnacceptedResponseEncodingRejection { * Signals that the request was rejected because the user could not be authenticated. The reason for the rejection is * specified in the cause. */ -case class AuthenticationFailedRejection(cause: AuthenticationFailedRejection.Cause, +case class AuthenticationFailedRejection(cause: AuthenticationFailedRejection.Cause, challenge: HttpChallenge) extends Rejection object AuthenticationFailedRejection { diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala index cb2e4df..470c91a 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RejectionHandler.scala @@ -88,8 +88,8 @@ object RejectionHandler { def apply(rejection: Rejection) = rejection.asInstanceOf[T] } - private class BuiltRejectionHandler(val cases: Vector[Handler], - val notFound: Option[Route], + private class BuiltRejectionHandler(val cases: Vector[Handler], + val notFound: Option[Route], val isDefault: Boolean) extends RejectionHandler { def apply(rejections: immutable.Seq[Rejection]): Option[Route] = if (rejections.nonEmpty) { @@ -120,7 +120,7 @@ object RejectionHandler { complete((BadRequest, "Uri scheme not allowed, supported schemes: " + schemes)) } .handleAll[MethodRejection] { rejections ⇒ - val (methods, names) = rejections.map(r ⇒ r.supported -> r.supported.name).unzip + val (methods, names) = rejections.map(r ⇒ r.supported → r.supported.name).unzip complete((MethodNotAllowed, List(Allow(methods)), "HTTP method not allowed, supported methods: " + names.mkString(", "))) } .handle { diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala index a8b2236..24a41ee 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContext.scala @@ -53,9 +53,9 @@ trait RequestContext { */ def reconfigure( executionContext: ExecutionContextExecutor = executionContext, - materializer: Materializer = materializer, - log: LoggingAdapter = log, - settings: RoutingSettings = settings): RequestContext + materializer: Materializer = materializer, + log: LoggingAdapter = log, + settings: RoutingSettings = settings): RequestContext /** * Completes the request with the given ToResponseMarshallable. diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala index dfe96a8..d474ff4 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RequestContextImpl.scala @@ -17,13 +17,13 @@ import akka.http.scaladsl.util.FastFuture._ * INTERNAL API */ private[http] class RequestContextImpl( - val request: HttpRequest, - val unmatchedPath: Uri.Path, + val request: HttpRequest, + val unmatchedPath: Uri.Path, val executionContext: ExecutionContextExecutor, - val materializer: Materializer, - val log: LoggingAdapter, - val settings: RoutingSettings, - val parserSettings: ParserSettings) extends RequestContext { + val materializer: Materializer, + val log: LoggingAdapter, + val settings: RoutingSettings, + val parserSettings: ParserSettings) extends RequestContext { def this(request: HttpRequest, log: LoggingAdapter, settings: RoutingSettings, parserSettings: ParserSettings)(implicit ec: ExecutionContextExecutor, materializer: Materializer) = this(request, request.uri.path, ec, materializer, log, settings, parserSettings) @@ -89,12 +89,12 @@ private[http] class RequestContextImpl( case _ ⇒ this } - private def copy(request: HttpRequest = request, - unmatchedPath: Uri.Path = unmatchedPath, + private def copy(request: HttpRequest = request, + unmatchedPath: Uri.Path = unmatchedPath, executionContext: ExecutionContextExecutor = executionContext, - materializer: Materializer = materializer, - log: LoggingAdapter = log, - routingSettings: RoutingSettings = settings, - parserSettings: ParserSettings = parserSettings) = + materializer: Materializer = materializer, + log: LoggingAdapter = log, + routingSettings: RoutingSettings = settings, + parserSettings: ParserSettings = parserSettings) = new RequestContextImpl(request, unmatchedPath, executionContext, materializer, log, routingSettings, parserSettings) } diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala index 246d303..b1e9d38 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/Route.scala @@ -24,7 +24,7 @@ object Route { * "Seals" a route by wrapping it with exception handling and rejection conversion. */ def seal(route: Route)(implicit routingSettings: RoutingSettings, - parserSettings: ParserSettings = null, + parserSettings: ParserSettings = null, rejectionHandler: RejectionHandler = RejectionHandler.default, exceptionHandler: ExceptionHandler = null): Route = { import directives.ExecutionDirectives._ @@ -41,24 +41,24 @@ object Route { * This conversion is also implicitly available through [[RouteResult.route2HandlerFlow]]. */ def handlerFlow(route: Route)(implicit routingSettings: RoutingSettings, - parserSettings: ParserSettings, - materializer: Materializer, - routingLog: RoutingLog, + parserSettings: ParserSettings, + materializer: Materializer, + routingLog: RoutingLog, executionContext: ExecutionContextExecutor = null, - rejectionHandler: RejectionHandler = RejectionHandler.default, - exceptionHandler: ExceptionHandler = null): Flow[HttpRequest, HttpResponse, NotUsed] = + rejectionHandler: RejectionHandler = RejectionHandler.default, + exceptionHandler: ExceptionHandler = null): Flow[HttpRequest, HttpResponse, NotUsed] = Flow[HttpRequest].mapAsync(1)(asyncHandler(route)) /** * Turns a `Route` into an async handler function. */ def asyncHandler(route: Route)(implicit routingSettings: RoutingSettings, - parserSettings: ParserSettings, - materializer: Materializer, - routingLog: RoutingLog, + parserSettings: ParserSettings, + materializer: Materializer, + routingLog: RoutingLog, executionContext: ExecutionContextExecutor = null, - rejectionHandler: RejectionHandler = RejectionHandler.default, - exceptionHandler: ExceptionHandler = null): HttpRequest ⇒ Future[HttpResponse] = { + rejectionHandler: RejectionHandler = RejectionHandler.default, + exceptionHandler: ExceptionHandler = null): HttpRequest ⇒ Future[HttpResponse] = { val effectiveEC = if (executionContext ne null) executionContext else materializer.executionContext { diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala index d871e06..fa0c911 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/RouteResult.scala @@ -25,9 +25,9 @@ object RouteResult { final case class Rejected(rejections: immutable.Seq[Rejection]) extends RouteResult implicit def route2HandlerFlow(route: Route)(implicit routingSettings: RoutingSettings, - parserSettings: ParserSettings, - materializer: Materializer, - routingLog: RoutingLog, + parserSettings: ParserSettings, + materializer: Materializer, + routingLog: RoutingLog, executionContext: ExecutionContext = null, rejectionHandler: RejectionHandler = RejectionHandler.default, exceptionHandler: ExceptionHandler = null): Flow[HttpRequest, HttpResponse, NotUsed] = diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala index c878ffe..bca9dd1 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/DebuggingDirectives.scala @@ -51,7 +51,7 @@ case class LoggingMagnet[T](f: LoggingAdapter ⇒ T) // # logging-magnet object LoggingMagnet { implicit def forMessageFromMarker[T](marker: String): LoggingMagnet[T ⇒ Unit] = // # message-magnets - forMessageFromMarkerAndLevel[T](marker -> DebugLevel) + forMessageFromMarkerAndLevel[T](marker → DebugLevel) implicit def forMessageFromMarkerAndLevel[T](markerAndLevel: (String, LogLevel)): LoggingMagnet[T ⇒ Unit] = // # message-magnets forMessageFromFullShow[T] { @@ -66,7 +66,7 @@ object LoggingMagnet { LoggingMagnet(log ⇒ show(_).logTo(log)) implicit def forRequestResponseFromMarker(marker: String): LoggingMagnet[HttpRequest ⇒ RouteResult ⇒ Unit] = // # request-response-magnets - forRequestResponseFromMarkerAndLevel(marker -> DebugLevel) + forRequestResponseFromMarkerAndLevel(marker → DebugLevel) implicit def forRequestResponseFromMarkerAndLevel(markerAndLevel: (String, LogLevel)): LoggingMagnet[HttpRequest ⇒ RouteResult ⇒ Unit] = // # request-response-magnets forRequestResponseFromFullShow { @@ -76,10 +76,9 @@ object LoggingMagnet { } implicit def forRequestResponseFromFullShow(show: HttpRequest ⇒ RouteResult ⇒ Option[LogEntry]): LoggingMagnet[HttpRequest ⇒ RouteResult ⇒ Unit] = // # request-response-magnets - LoggingMagnet { log ⇒ - request ⇒ - val showResult = show(request) - result ⇒ showResult(result).foreach(_.logTo(log)) + LoggingMagnet { log ⇒ request ⇒ + val showResult = show(request) + result ⇒ showResult(result).foreach(_.logTo(log)) } } diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala index 009a4a6..e084f28 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/ExecutionDirectives.scala @@ -19,15 +19,14 @@ trait ExecutionDirectives { * [[akka.http.scaladsl.server.ExceptionHandler]]. */ def handleExceptions(handler: ExceptionHandler): Directive0 = - Directive { innerRouteBuilder ⇒ - ctx ⇒ - import ctx.executionContext - def handleException: PartialFunction[Throwable, Future[RouteResult]] = - handler andThen (_(ctx.withAcceptAll)) - try innerRouteBuilder(())(ctx).fast.recoverWith(handleException) - catch { - case NonFatal(e) ⇒ handleException.applyOrElse[Throwable, Future[RouteResult]](e, throw _) - } + Directive { innerRouteBuilder ⇒ ctx ⇒ + import ctx.executionContext + def handleException: PartialFunction[Throwable, Future[RouteResult]] = + handler andThen (_(ctx.withAcceptAll)) + try innerRouteBuilder(())(ctx).fast.recoverWith(handleException) + catch { + case NonFatal(e) ⇒ handleException.applyOrElse[Throwable, Future[RouteResult]](e, throw _) + } } /** diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala index 9da99b9..541cace 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FileAndResourceDirectives.scala @@ -308,39 +308,38 @@ object DirectoryListing { |""".stripMarginWithNewline("\n") split '$' def directoryMarshaller(renderVanityFooter: Boolean): ToEntityMarshaller[DirectoryListing] = - Marshaller.StringMarshaller.wrapWithEC(MediaTypes.`text/html`) { implicit ec ⇒ - listing ⇒ - val DirectoryListing(path, isRoot, files) = listing - val filesAndNames = files.map(file ⇒ file -> file.getName).sortBy(_._2) - val deduped = filesAndNames.zipWithIndex.flatMap { - case (fan @ (file, name), ix) ⇒ - if (ix == 0 || filesAndNames(ix - 1)._2 != name) Some(fan) else None - } - val (directoryFilesAndNames, fileFilesAndNames) = deduped.partition(_._1.isDirectory) - def maxNameLength(seq: Seq[(File, String)]) = if (seq.isEmpty) 0 else seq.map(_._2.length).max - val maxNameLen = math.max(maxNameLength(directoryFilesAndNames) + 1, maxNameLength(fileFilesAndNames)) - val sb = new java.lang.StringBuilder - sb.append(html(0)).append(path).append(html(1)).append(path).append(html(2)) - if (!isRoot) { - val secondToLastSlash = path.lastIndexOf('/', path.lastIndexOf('/', path.length - 1) - 1) - sb.append("../\n" format path.substring(0, secondToLastSlash)) - } - def lastModified(file: File) = DateTime(file.lastModified).toIsoLikeDateTimeString - def start(name: String) = - sb.append("").append(name).append("") - .append(" " * (maxNameLen - name.length)) - def renderDirectory(file: File, name: String) = - start(name + '/').append(" ").append(lastModified(file)).append('\n') - def renderFile(file: File, name: String) = { - val size = akka.http.impl.util.humanReadableByteCount(file.length, si = true) - start(name).append(" ").append(lastModified(file)) - sb.append(" ".substring(size.length)).append(size).append('\n') - } - for ((file, name) ← directoryFilesAndNames) renderDirectory(file, name) - for ((file, name) ← fileFilesAndNames) renderFile(file, name) - if (isRoot && files.isEmpty) sb.append("(no files)\n") - sb.append(html(3)) - if (renderVanityFooter) sb.append(html(4)).append(DateTime.now.toIsoLikeDateTimeString).append(html(5)) - sb.append(html(6)).toString + Marshaller.StringMarshaller.wrapWithEC(MediaTypes.`text/html`) { implicit ec ⇒ listing ⇒ + val DirectoryListing(path, isRoot, files) = listing + val filesAndNames = files.map(file ⇒ file → file.getName).sortBy(_._2) + val deduped = filesAndNames.zipWithIndex.flatMap { + case (fan @ (file, name), ix) ⇒ + if (ix == 0 || filesAndNames(ix - 1)._2 != name) Some(fan) else None + } + val (directoryFilesAndNames, fileFilesAndNames) = deduped.partition(_._1.isDirectory) + def maxNameLength(seq: Seq[(File, String)]) = if (seq.isEmpty) 0 else seq.map(_._2.length).max + val maxNameLen = math.max(maxNameLength(directoryFilesAndNames) + 1, maxNameLength(fileFilesAndNames)) + val sb = new java.lang.StringBuilder + sb.append(html(0)).append(path).append(html(1)).append(path).append(html(2)) + if (!isRoot) { + val secondToLastSlash = path.lastIndexOf('/', path.lastIndexOf('/', path.length - 1) - 1) + sb.append("../\n" format path.substring(0, secondToLastSlash)) + } + def lastModified(file: File) = DateTime(file.lastModified).toIsoLikeDateTimeString + def start(name: String) = + sb.append("").append(name).append("") + .append(" " * (maxNameLen - name.length)) + def renderDirectory(file: File, name: String) = + start(name + '/').append(" ").append(lastModified(file)).append('\n') + def renderFile(file: File, name: String) = { + val size = akka.http.impl.util.humanReadableByteCount(file.length, si = true) + start(name).append(" ").append(lastModified(file)) + sb.append(" ".substring(size.length)).append(size).append('\n') + } + for ((file, name) ← directoryFilesAndNames) renderDirectory(file, name) + for ((file, name) ← fileFilesAndNames) renderFile(file, name) + if (isRoot && files.isEmpty) sb.append("(no files)\n") + sb.append(html(3)) + if (renderVanityFooter) sb.append(html(4)).append(DateTime.now.toIsoLikeDateTimeString).append(html(5)) + sb.append(html(6)).toString } } diff --git a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala index ed5b6eb..96a20ed 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/server/directives/FormFieldDirectives.scala @@ -85,7 +85,7 @@ object FormFieldDirectives extends FormFieldDirectives { private val _formFieldMultiMap: Directive1[Map[String, List[String]]] = { @tailrec def append( - map: Map[String, List[String]], + map: Map[String, List[String]], fields: immutable.Seq[(String, String)]): Map[String, List[String]] = { if (fields.isEmpty) { map diff --git a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala index f1b0623..42f3d9d 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/MultipartUnmarshallers.scala @@ -10,7 +10,7 @@ import scala.collection.immutable import scala.collection.immutable.VectorBuilder import akka.util.ByteString import akka.event.{ NoLogging, LoggingAdapter } -import akka.stream.{ActorMaterializer, OverflowStrategy} +import akka.stream.{ ActorMaterializer, OverflowStrategy } import akka.stream.impl.fusing.IteratorInterpreter import akka.stream.scaladsl._ import akka.http.impl.engine.parsing.BodyPartParser @@ -61,13 +61,13 @@ trait MultipartUnmarshallers { createStrict = (_, parts) ⇒ Multipart.ByteRanges.Strict(parts)) def multipartUnmarshaller[T <: Multipart, BP <: Multipart.BodyPart, BPS <: Multipart.BodyPart.Strict]( - mediaRange: MediaRange, - defaultContentType: ContentType, - createBodyPart: (BodyPartEntity, List[HttpHeader]) ⇒ BP, - createStreamed: (MediaType.Multipart, Source[BP, Any]) ⇒ T, + mediaRange: MediaRange, + defaultContentType: ContentType, + createBodyPart: (BodyPartEntity, List[HttpHeader]) ⇒ BP, + createStreamed: (MediaType.Multipart, Source[BP, Any]) ⇒ T, createStrictBodyPart: (HttpEntity.Strict, List[HttpHeader]) ⇒ BPS, - createStrict: (MediaType.Multipart, immutable.Seq[BPS]) ⇒ T)(implicit log: LoggingAdapter = NoLogging, parserSettings: ParserSettings = null): FromEntityUnmarshaller[T] = - Unmarshaller.withMaterializer { implicit ec ⇒ mat => + createStrict: (MediaType.Multipart, immutable.Seq[BPS]) ⇒ T)(implicit log: LoggingAdapter = NoLogging, parserSettings: ParserSettings = null): FromEntityUnmarshaller[T] = + Unmarshaller.withMaterializer { implicit ec ⇒ mat ⇒ entity ⇒ if (entity.contentType.mediaType.isMultipart && mediaRange.matches(entity.contentType.mediaType)) { entity.contentType.mediaType.params.get("boundary") match { diff --git a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala index 44b6043..32f81d5 --- a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/PredefinedFromStringUnmarshallers.scala @@ -40,9 +40,8 @@ trait PredefinedFromStringUnmarshallers { implicit def CsvSeq[T](implicit unmarshaller: Unmarshaller[String, T]): Unmarshaller[String, immutable.Seq[T]] = Unmarshaller.strict[String, immutable.Seq[String]] { string ⇒ string.split(",").toList - } flatMap { implicit ec ⇒ - implicit mat ⇒ strings ⇒ - FastFuture.sequence(strings.map(unmarshaller(_))) + } flatMap { implicit ec ⇒ implicit mat ⇒ strings ⇒ + FastFuture.sequence(strings.map(unmarshaller(_))) } val HexByte: Unmarshaller[String, Byte] = diff --git a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala index a8a7098..18ff303 100644 --- a/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala +++ b/akka-http/src/main/scala/akka/http/scaladsl/unmarshalling/Unmarshaller.scala @@ -101,17 +101,16 @@ object Unmarshaller * an IllegalStateException will be thrown! */ def forContentTypes(ranges: ContentTypeRange*): FromEntityUnmarshaller[A] = - Unmarshaller.withMaterializer { implicit ec ⇒ - implicit mat ⇒ - entity ⇒ - if (entity.contentType == ContentTypes.NoContentType || ranges.exists(_ matches entity.contentType)) { - underlying(entity).fast.recover[A](barkAtUnsupportedContentTypeException(ranges, entity.contentType)) - } else FastFuture.failed(UnsupportedContentTypeException(ranges: _*)) + Unmarshaller.withMaterializer { implicit ec ⇒ implicit mat ⇒ + entity ⇒ + if (entity.contentType == ContentTypes.NoContentType || ranges.exists(_ matches entity.contentType)) { + underlying(entity).fast.recover[A](barkAtUnsupportedContentTypeException(ranges, entity.contentType)) + } else FastFuture.failed(UnsupportedContentTypeException(ranges: _*)) } // TODO: move back into the [[EnhancedFromEntityUnmarshaller]] value class after the upgrade to Scala 2.11, // Scala 2.10 suffers from this bug: https://issues.scala-lang.org/browse/SI-8018 - private def barkAtUnsupportedContentTypeException(ranges: Seq[ContentTypeRange], + private def barkAtUnsupportedContentTypeException(ranges: Seq[ContentTypeRange], newContentType: ContentType): PartialFunction[Throwable, Nothing] = { case UnsupportedContentTypeException(supported) ⇒ throw new IllegalStateException( s"Illegal use of `unmarshaller.forContentTypes($ranges)`: $newContentType is not supported by underlying marshaller!") diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala index 7aa6c66..160b2fb 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -431,7 +431,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP } fsm ! ToClient(BarrierResult("initial startup", false)) } else { - nodes += name -> c + nodes += name → c if (initialParticipants <= 0) fsm ! ToClient(Done) else if (nodes.size == initialParticipants) { for (NodeInfo(_, _, client) ← nodes.values) client ! ToClient(Done) @@ -451,7 +451,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP case _: FailBarrier ⇒ barrier forward op case GetAddress(node) ⇒ if (nodes contains node) sender() ! ToClient(AddressReply(node, nodes(node).addr)) - else addrInterest += node -> ((addrInterest get node getOrElse Set()) + sender()) + else addrInterest += node → ((addrInterest get node getOrElse Set()) + sender()) case _: Done ⇒ //FIXME what should happen? } case op: CommandOp ⇒ @@ -567,8 +567,8 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor } onTransition { - case Idle -> Waiting ⇒ setTimer("Timeout", StateTimeout, nextStateData.deadline.timeLeft, false) - case Waiting -> Idle ⇒ cancelTimer("Timeout") + case Idle → Waiting ⇒ setTimer("Timeout", StateTimeout, nextStateData.deadline.timeLeft, false) + case Waiting → Idle ⇒ cancelTimer("Timeout") } when(Waiting) { diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala index d698c67..d2ef445 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala @@ -192,8 +192,8 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case Event(ToServer(msg), d @ Data(Some(channel), None)) ⇒ channel.write(msg) val token = msg match { - case EnterBarrier(barrier, timeout) ⇒ Some(barrier -> sender()) - case GetAddress(node) ⇒ Some(node.name -> sender()) + case EnterBarrier(barrier, timeout) ⇒ Some(barrier → sender()) + case GetAddress(node) ⇒ Some(node.name → sender()) case _ ⇒ None } stay using d.copy(runningOp = token) @@ -274,13 +274,13 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) * INTERNAL API. */ private[akka] class PlayerHandler( - server: InetSocketAddress, + server: InetSocketAddress, private var reconnects: Int, - backoff: FiniteDuration, - poolSize: Int, - fsm: ActorRef, - log: LoggingAdapter, - scheduler: Scheduler)(implicit executor: ExecutionContext) + backoff: FiniteDuration, + poolSize: Int, + fsm: ActorRef, + log: LoggingAdapter, + scheduler: Scheduler)(implicit executor: ExecutionContext) extends SimpleChannelUpstreamHandler { import ClientFSM._ diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala index aae55a1..7bf179e 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -41,7 +41,7 @@ abstract class MultiNodeConfig { */ def nodeConfig(roles: RoleName*)(configs: Config*): Unit = { val c = configs.reduceLeft(_ withFallback _) - _nodeConf ++= roles map { _ -> c } + _nodeConf ++= roles map { _ → c } } /** @@ -78,7 +78,7 @@ abstract class MultiNodeConfig { } def deployOn(role: RoleName, deployment: String): Unit = - _deployments += role -> ((_deployments get role getOrElse Vector()) :+ deployment) + _deployments += role → ((_deployments get role getOrElse Vector()) :+ deployment) def deployOnAll(deployment: String): Unit = _allDeploy :+= deployment @@ -195,9 +195,9 @@ object MultiNodeSpec { require(selfIndex >= 0 && selfIndex < maxNodes, "multinode.index is out of bounds: " + selfIndex) private[testkit] val nodeConfig = mapToConfig(Map( - "akka.actor.provider" -> "akka.remote.RemoteActorRefProvider", - "akka.remote.netty.tcp.hostname" -> selfName, - "akka.remote.netty.tcp.port" -> selfPort)) + "akka.actor.provider" → "akka.remote.RemoteActorRefProvider", + "akka.remote.netty.tcp.hostname" → selfName, + "akka.remote.netty.tcp.port" → selfPort)) private[testkit] val baseConfig: Config = ConfigFactory.parseString(""" akka { diff --git a/akka-parsing/src/main/scala/akka/parboiled2/CharUtils.scala b/akka-parsing/src/main/scala/akka/parboiled2/CharUtils.scala index 36bba80..5803c18 100644 --- a/akka-parsing/src/main/scala/akka/parboiled2/CharUtils.scala +++ b/akka-parsing/src/main/scala/akka/parboiled2/CharUtils.scala @@ -145,7 +145,7 @@ object CharUtils { phase1(math.abs(long), endIndex) - // for large numbers we bite the bullet of performing one division every two digits + // for large numbers we bite the bullet of performing one division every two digits @tailrec def phase1(l: Long, ix: Int): Unit = if (l > 65535L) { val q = l / 100 diff --git a/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala b/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala index 5b454c7..a968dd7 100644 --- a/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala +++ b/akka-parsing/src/main/scala/akka/parboiled2/ErrorFormatter.scala @@ -34,13 +34,13 @@ import java.lang.{ StringBuilder ⇒ JStringBuilder } * Set to a value < 0 to disable tab expansion. * @param traceCutOff the maximum number of (trailing) characters shown for a rule trace */ -class ErrorFormatter(showExpected: Boolean = true, - showPosition: Boolean = true, - showLine: Boolean = true, - showTraces: Boolean = false, +class ErrorFormatter(showExpected: Boolean = true, + showPosition: Boolean = true, + showLine: Boolean = true, + showTraces: Boolean = false, showFrameStartOffset: Boolean = true, - expandTabs: Int = -1, - traceCutOff: Int = 120) { + expandTabs: Int = -1, + traceCutOff: Int = 120) { /** * Formats the given [[ParseError]] into a String using the settings configured for this formatter instance. @@ -177,8 +177,8 @@ class ErrorFormatter(showExpected: Boolean = true, } rec(inCol + 1, ec) } else errorCol + 1 - if (expandTabs >= 0) rec(0, 0) -> sb.toString() - else errorColumn -> line + if (expandTabs >= 0) rec(0, 0) → sb.toString() + else errorColumn → line } /** @@ -225,8 +225,8 @@ class ErrorFormatter(showExpected: Boolean = true, /** * Formats the head element of a [[RuleTrace]] into a String. */ - def formatNonTerminal(nonTerminal: RuleTrace.NonTerminal, - showFrameStartOffset: Boolean = showFrameStartOffset): String = { + def formatNonTerminal(nonTerminal: RuleTrace.NonTerminal, + showFrameStartOffset: Boolean = showFrameStartOffset): String = { import RuleTrace._ import CharUtils.escape val keyString = nonTerminal.key match { diff --git a/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala b/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala index 7c62c2f..f349242 100644 --- a/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala +++ b/akka-parsing/src/main/scala/akka/parboiled2/ParseError.scala @@ -19,9 +19,9 @@ package akka.parboiled2 import scala.annotation.tailrec import scala.collection.immutable -case class ParseError(position: Position, +case class ParseError(position: Position, principalPosition: Position, - traces: immutable.Seq[RuleTrace]) extends RuntimeException { + traces: immutable.Seq[RuleTrace]) extends RuntimeException { require(principalPosition.index >= position.index, "principalPosition must be > position") def format(parser: Parser): String = format(parser.input) def format(parser: Parser, formatter: ErrorFormatter): String = format(parser.input, formatter) diff --git a/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala b/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala index 5471a30..7b2b46e 100644 --- a/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala +++ b/akka-parsing/src/main/scala/akka/parboiled2/Parser.scala @@ -24,7 +24,7 @@ import akka.shapeless._ import akka.parboiled2.support._ abstract class Parser(initialValueStackSize: Int = 16, - maxValueStackSize: Int = 1024) extends RuleDSL { + maxValueStackSize: Int = 1024) extends RuleDSL { import Parser._ require(maxValueStackSize <= 65536, "`maxValueStackSize` > 2^16 is not supported") // due to current snapshot design @@ -176,7 +176,7 @@ abstract class Parser(initialValueStackSize: Int = 16, @tailrec def phase4_collectRuleTraces(reportedErrorIndex: Int, principalErrorIndex: Int, reportQuiet: Boolean)( - phase3: CollectingRuleTraces = new CollectingRuleTraces(reportedErrorIndex, reportQuiet), + phase3: CollectingRuleTraces = new CollectingRuleTraces(reportedErrorIndex, reportQuiet), traces: VectorBuilder[RuleTrace] = new VectorBuilder): ParseError = { def done = { @@ -592,8 +592,8 @@ object Parser { // or -1 if no atomic rule fails with a mismatch at the principal error index private class EstablishingReportedErrorIndex( private var _principalErrorIndex: Int, - var currentAtomicStart: Int = Int.MinValue, - var maxAtomicErrorStart: Int = Int.MinValue) extends ErrorAnalysisPhase { + var currentAtomicStart: Int = Int.MinValue, + var maxAtomicErrorStart: Int = Int.MinValue) extends ErrorAnalysisPhase { def reportedErrorIndex = if (maxAtomicErrorStart >= 0) maxAtomicErrorStart else _principalErrorIndex def applyOffset(offset: Int) = { _principalErrorIndex -= offset @@ -606,8 +606,8 @@ object Parser { // in which case we need to report them even though they are marked as "quiet" private class DetermineReportQuiet( private var _minErrorIndex: Int, // the smallest index at which a mismatch triggers a StartTracingException - var inQuiet: Boolean = false // are we currently in a quiet rule? - ) extends ErrorAnalysisPhase { + var inQuiet: Boolean = false // are we currently in a quiet rule? + ) extends ErrorAnalysisPhase { def minErrorIndex = _minErrorIndex def applyOffset(offset: Int) = _minErrorIndex -= offset } @@ -615,11 +615,11 @@ object Parser { // collect the traces of all mismatches happening at an index >= minErrorIndex (the reported error index) // by throwing a StartTracingException which gets turned into a TracingBubbleException by the terminal rule private class CollectingRuleTraces( - var minErrorIndex: Int, // the smallest index at which a mismatch triggers a StartTracingException - val reportQuiet: Boolean, // do we need to trace mismatches from quiet rules? - val traceNr: Int = 0, // the zero-based index number of the RuleTrace we are currently building - var errorMismatches: Int = 0 // the number of times we have already seen a mismatch at >= minErrorIndex - ) extends ErrorAnalysisPhase { + var minErrorIndex: Int, // the smallest index at which a mismatch triggers a StartTracingException + val reportQuiet: Boolean, // do we need to trace mismatches from quiet rules? + val traceNr: Int = 0, // the zero-based index number of the RuleTrace we are currently building + var errorMismatches: Int = 0 // the number of times we have already seen a mismatch at >= minErrorIndex + ) extends ErrorAnalysisPhase { def applyOffset(offset: Int) = minErrorIndex -= offset } } diff --git a/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala b/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala index a9f9dfc..6867ddd 100644 --- a/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala +++ b/akka-parsing/src/main/scala/akka/parboiled2/support/OpTreeContext.scala @@ -133,7 +133,7 @@ trait OpTreeContext[OpTreeCtx <: ParserMacros.ParserContext] { opTreePF.applyOrElse(tree, (t: Tree) ⇒ c.abort(t.pos, "Invalid rule definition: " + t)) def Sequence(lhs: OpTree, rhs: OpTree): Sequence = - lhs -> rhs match { + lhs → rhs match { case (Sequence(lops), Sequence(rops)) ⇒ Sequence(lops ++ rops) case (Sequence(lops), _) ⇒ Sequence(lops :+ rhs) case (_, Sequence(ops)) ⇒ Sequence(lhs +: ops) @@ -160,7 +160,7 @@ trait OpTreeContext[OpTreeCtx <: ParserMacros.ParserContext] { } def FirstOf(lhs: OpTree, rhs: OpTree): FirstOf = - lhs -> rhs match { + lhs → rhs match { case (FirstOf(lops), FirstOf(rops)) ⇒ FirstOf(lops ++ rops) case (FirstOf(lops), _) ⇒ FirstOf(lops :+ rhs) case (_, FirstOf(ops)) ⇒ FirstOf(lhs +: ops) @@ -397,13 +397,13 @@ trait OpTreeContext[OpTreeCtx <: ParserMacros.ParserContext] { if (i <= 0) c.abort(base.pos, "`x` in `x.times` must be positive") else if (i == 1) rule else Times(rule, q"val min, max = $n", collector, separator) - case x@(Ident(_) | Select(_, _)) ⇒ Times(rule, q"val min = $n; val max = min", collector, separator) - case _ ⇒ c.abort(n.pos, "Invalid int base expression for `.times(...)`: " + n) + case x @ (Ident(_) | Select(_, _)) ⇒ Times(rule, q"val min = $n; val max = min", collector, separator) + case _ ⇒ c.abort(n.pos, "Invalid int base expression for `.times(...)`: " + n) } case q"$a.this.range2NTimes($r)" ⇒ r match { - case q"scala.Predef.intWrapper($mn).to($mx)" ⇒ handleRange(mn, mx, r) // Scala 2.12 + case q"scala.Predef.intWrapper($mn).to($mx)" ⇒ handleRange(mn, mx, r) // Scala 2.12 case q"scala.this.Predef.intWrapper($mn).to($mx)" ⇒ handleRange(mn, mx, r) // Scala 2.11 - case x@(Ident(_) | Select(_, _)) ⇒ + case x @ (Ident(_) | Select(_, _)) ⇒ Times(rule, q"val r = $r; val min = r.start; val max = r.end", collector, separator) case _ ⇒ c.abort(r.pos, "Invalid range base expression for `.times(...)`: " + r) } @@ -599,8 +599,8 @@ trait OpTreeContext[OpTreeCtx <: ParserMacros.ParserContext] { } def CharRange(lowerTree: Tree, upperTree: Tree): CharacterRange = { - val (lower, upper) = lowerTree -> upperTree match { - case (Literal(Constant(l: String)), Literal(Constant(u: String))) ⇒ l -> u + val (lower, upper) = lowerTree → upperTree match { + case (Literal(Constant(l: String)), Literal(Constant(u: String))) ⇒ l → u case _ ⇒ c.abort(lowerTree.pos, "Character ranges must be specified with string literals") } if (lower.length != 1) c.abort(lowerTree.pos, "lower bound must be a single char string") @@ -689,11 +689,11 @@ trait OpTreeContext[OpTreeCtx <: ParserMacros.ParserContext] { /////////////////////////////////// helpers //////////////////////////////////// class Collector( - val valBuilder: Tree, - val popToBuilder: Tree, + val valBuilder: Tree, + val popToBuilder: Tree, val pushBuilderResult: Tree, - val pushSomePop: Tree, - val pushNone: Tree) + val pushSomePop: Tree, + val pushNone: Tree) lazy val rule0Collector = { val unit = q"()" diff --git a/akka-parsing/src/main/scala/akka/shapeless/hlists.scala b/akka-parsing/src/main/scala/akka/shapeless/hlists.scala index e14c8be..9ec00b6 100644 --- a/akka-parsing/src/main/scala/akka/shapeless/hlists.scala +++ b/akka-parsing/src/main/scala/akka/shapeless/hlists.scala @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-13 Miles Sabin + * Copyright (c) 2011-13 Miles Sabin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ package akka.shapeless - /** * `HList` ADT base trait. * diff --git a/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala b/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala index 6f2b677..679da61 100644 --- a/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala +++ b/akka-parsing/src/main/scala/akka/shapeless/ops/hlists.scala @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-13 Miles Sabin + * Copyright (c) 2011-13 Miles Sabin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ package akka.shapeless package ops - object hlist { /** * Type class witnessing that this `HList` is composite and providing access to head and tail. diff --git a/akka-parsing/src/main/scala/akka/shapeless/package.scala b/akka-parsing/src/main/scala/akka/shapeless/package.scala index efb78f6..fe15b1b 100644 --- a/akka-parsing/src/main/scala/akka/shapeless/package.scala +++ b/akka-parsing/src/main/scala/akka/shapeless/package.scala @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-14 Miles Sabin + * Copyright (c) 2013-14 Miles Sabin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/akka-parsing/src/main/scala/akka/shapeless/syntax/hlists.scala b/akka-parsing/src/main/scala/akka/shapeless/syntax/hlists.scala index 2bebfa8..66cc303 100644 --- a/akka-parsing/src/main/scala/akka/shapeless/syntax/hlists.scala +++ b/akka-parsing/src/main/scala/akka/shapeless/syntax/hlists.scala @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-13 Miles Sabin + * Copyright (c) 2011-13 Miles Sabin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala index b6b1706..67ac0c1 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala @@ -8,7 +8,7 @@ package akka.persistence.query * [[akka.persistence.query.scaladsl.EventsByTagQuery]] query, or similar queries. */ final case class EventEnvelope( - offset: Long, + offset: Long, persistenceId: String, - sequenceNr: Long, - event: Any) + sequenceNr: Long, + event: Any) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala index 63fa0f6..e521565 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala @@ -123,7 +123,7 @@ private[akka] abstract class AbstractEventsByPersistenceIdPublisher( private[akka] class LiveEventsByPersistenceIdPublisher( persistenceId: String, fromSequenceNr: Long, override val toSequenceNr: Long, refreshInterval: FiniteDuration, - maxBufSize: Int, writeJournalPluginId: String) + maxBufSize: Int, writeJournalPluginId: String) extends AbstractEventsByPersistenceIdPublisher( persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) { import EventsByPersistenceIdPublisher._ diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala index 49903e5..4f2d41d 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala @@ -125,7 +125,7 @@ private[akka] abstract class AbstractEventsByTagPublisher( private[akka] class LiveEventsByTagPublisher( tag: String, fromOffset: Long, override val toOffset: Long, refreshInterval: FiniteDuration, - maxBufSize: Int, writeJournalPluginId: String) + maxBufSize: Int, writeJournalPluginId: String) extends AbstractEventsByTagPublisher( tag, fromOffset, maxBufSize, writeJournalPluginId) { import EventsByTagPublisher._ diff --git a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala index 1ac4c8e..8522275 100644 --- a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala @@ -319,7 +319,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { deliverySequenceNr = snapshot.currentDeliveryId val now = System.nanoTime() unconfirmed = snapshot.unconfirmedDeliveries.map(d ⇒ - d.deliveryId -> Delivery(d.destination, d.message, now, 0))(breakOut) + d.deliveryId → Delivery(d.destination, d.message, now, 0))(breakOut) } /** diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala index 2e1ed02..b75e3ab 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala @@ -115,11 +115,11 @@ trait PersistentRepr extends Message { * Creates a new copy of this [[PersistentRepr]]. */ def update( - sequenceNr: Long = sequenceNr, - persistenceId: String = persistenceId, - deleted: Boolean = deleted, - sender: ActorRef = sender, - writerUuid: String = writerUuid): PersistentRepr + sequenceNr: Long = sequenceNr, + persistenceId: String = persistenceId, + deleted: Boolean = deleted, + sender: ActorRef = sender, + writerUuid: String = writerUuid): PersistentRepr } object PersistentRepr { @@ -132,13 +132,13 @@ object PersistentRepr { * Plugin API. */ def apply( - payload: Any, - sequenceNr: Long = 0L, - persistenceId: String = PersistentRepr.Undefined, - manifest: String = PersistentRepr.Undefined, - deleted: Boolean = false, - sender: ActorRef = null, - writerUuid: String = PersistentRepr.Undefined): PersistentRepr = + payload: Any, + sequenceNr: Long = 0L, + persistenceId: String = PersistentRepr.Undefined, + manifest: String = PersistentRepr.Undefined, + deleted: Boolean = false, + sender: ActorRef = null, + writerUuid: String = PersistentRepr.Undefined): PersistentRepr = PersistentImpl(payload, sequenceNr, persistenceId, manifest, deleted, sender, writerUuid) /** @@ -157,13 +157,13 @@ object PersistentRepr { * INTERNAL API. */ private[persistence] final case class PersistentImpl( - override val payload: Any, - override val sequenceNr: Long, + override val payload: Any, + override val sequenceNr: Long, override val persistenceId: String, - override val manifest: String, - override val deleted: Boolean, - override val sender: ActorRef, - override val writerUuid: String) extends PersistentRepr with NoSerializationVerificationNeeded { + override val manifest: String, + override val deleted: Boolean, + override val sender: ActorRef, + override val writerUuid: String) extends PersistentRepr with NoSerializationVerificationNeeded { def withPayload(payload: Any): PersistentRepr = copy(payload = payload) diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala index 16ec17b..fd32fc5 100644 --- a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala +++ b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala @@ -52,8 +52,8 @@ final case class DeleteMessagesFailure(cause: Throwable, toSequenceNr: Long) @SerialVersionUID(1L) final case class Recovery( fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest, - toSequenceNr: Long = Long.MaxValue, - replayMax: Long = Long.MaxValue) + toSequenceNr: Long = Long.MaxValue, + replayMax: Long = Long.MaxValue) object Recovery { diff --git a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala index 8ab09fb..8a489db 100644 --- a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala +++ b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala @@ -107,9 +107,9 @@ final case class SnapshotOffer(metadata: SnapshotMetadata, snapshot: Any) @SerialVersionUID(1L) final case class SnapshotSelectionCriteria( maxSequenceNr: Long = Long.MaxValue, - maxTimestamp: Long = Long.MaxValue, + maxTimestamp: Long = Long.MaxValue, minSequenceNr: Long = 0L, - minTimestamp: Long = 0L) { + minTimestamp: Long = 0L) { /** * INTERNAL API. diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala index d9df80b..610c48b 100644 --- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala +++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSM.scala @@ -235,7 +235,7 @@ object PersistentFSM { * This extractor is just convenience for matching a (S, S) pair, including a * reminder what the new state is. */ - object -> { + object → { def unapply[S](in: (S, S)) = Some(in) } @@ -251,13 +251,13 @@ object PersistentFSM { * to be executed after FSM moves to the new state (also triggered when staying in the same state) */ final case class State[S, D, E]( - stateName: S, - stateData: D, - timeout: Option[FiniteDuration] = None, - stopReason: Option[Reason] = None, - replies: List[Any] = Nil, - domainEvents: Seq[E] = Nil, - afterTransitionDo: D ⇒ Unit = { _: D ⇒ })(private[akka] val notifies: Boolean = true) { + stateName: S, + stateData: D, + timeout: Option[FiniteDuration] = None, + stopReason: Option[Reason] = None, + replies: List[Any] = Nil, + domainEvents: Seq[E] = Nil, + afterTransitionDo: D ⇒ Unit = { _: D ⇒ })(private[akka] val notifies: Boolean = true) { /** * Copy object and update values if needed. diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala index b53cca9..9f40c80 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala @@ -280,7 +280,7 @@ private[persistence] object AsyncWriteJournal { delivered = d.snr d.target.tell(d.msg, d.sender) } else { - delayed += (d.snr -> d) + delayed += (d.snr → d) } val ro = delayed.remove(delivered + 1) if (ro.isDefined) resequence(ro.get) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala index a5e237b..0553cff 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala @@ -20,9 +20,9 @@ import scala.util.Try * `EventAdapters` serves as a per-journal collection of bound event adapters. */ class EventAdapters( - map: ConcurrentHashMap[Class[_], EventAdapter], + map: ConcurrentHashMap[Class[_], EventAdapter], bindings: immutable.Seq[(Class[_], EventAdapter)], - log: LoggingAdapter) { + log: LoggingAdapter) { /** * Finds the "most specific" matching adapter for the given class (i.e. it may return an adapter that can work on a @@ -71,8 +71,8 @@ private[akka] object EventAdapters { } private def apply( - system: ExtendedActorSystem, - adapters: Map[Name, FQN], + system: ExtendedActorSystem, + adapters: Map[Name, FQN], adapterBindings: Map[FQN, BoundAdapters]): EventAdapters = { val adapterNames = adapters.keys.toSet @@ -84,7 +84,7 @@ private[akka] object EventAdapters { // A Map of handler from alias to implementation (i.e. class implementing akka.serialization.Serializer) // For example this defines a handler named 'country': `"country" -> com.example.comain.CountryTagsAdapter` - val handlers = for ((k: String, v: String) ← adapters) yield k -> instantiateAdapter(v, system).get + val handlers = for ((k: String, v: String) ← adapters) yield k → instantiateAdapter(v, system).get // bindings is a Seq of tuple representing the mapping from Class to handler. // It is primarily ordered by the most specific classes first, and secondly in the configured order. @@ -131,7 +131,7 @@ private[akka] object EventAdapters { * loading is performed by the system’s [[akka.actor.DynamicAccess]]. */ private def instantiate[T: ClassTag](fqn: FQN, system: ExtendedActorSystem): Try[T] = - system.dynamicAccess.createInstanceFor[T](fqn, List(classOf[ExtendedActorSystem] -> system)) recoverWith { + system.dynamicAccess.createInstanceFor[T](fqn, List(classOf[ExtendedActorSystem] → system)) recoverWith { case _: NoSuchMethodException ⇒ system.dynamicAccess.createInstanceFor[T](fqn, Nil) } @@ -151,7 +151,7 @@ private[akka] object EventAdapters { private final def configToMap(config: Config, path: String): Map[String, String] = { import scala.collection.JavaConverters._ if (config.hasPath(path)) { - config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ k -> v.toString } + config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ k → v.toString } } else Map.empty } @@ -159,8 +159,8 @@ private[akka] object EventAdapters { import scala.collection.JavaConverters._ if (config.hasPath(path)) { config.getConfig(path).root.unwrapped.asScala.toMap map { - case (k, v: util.ArrayList[_]) if v.isInstanceOf[util.ArrayList[_]] ⇒ k -> v.asScala.map(_.toString).toList - case (k, v) ⇒ k -> List(v.toString) + case (k, v: util.ArrayList[_]) if v.isInstanceOf[util.ArrayList[_]] ⇒ k → v.asScala.map(_.toString).toList + case (k, v) ⇒ k → List(v.toString) } } else Map.empty } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala index 91add5a..5959043 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala @@ -20,10 +20,10 @@ import scala.collection.mutable.LinkedHashSet private[akka] object ReplayFilter { def props( persistentActor: ActorRef, - mode: Mode, - windowSize: Int, - maxOldWriters: Int, - debugEnabled: Boolean): Props = { + mode: Mode, + windowSize: Int, + maxOldWriters: Int, + debugEnabled: Boolean): Props = { require(windowSize > 0, "windowSize must be > 0") require(maxOldWriters > 0, "maxOldWriters must be > 0") require(mode != Disabled, "mode must not be Disabled") @@ -33,9 +33,9 @@ private[akka] object ReplayFilter { // for binary compatibility def props( persistentActor: ActorRef, - mode: Mode, - windowSize: Int, - maxOldWriters: Int): Props = props(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false) + mode: Mode, + windowSize: Int, + maxOldWriters: Int): Props = props(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false) sealed trait Mode case object Fail extends Mode diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala index 771c860..e52b902 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala @@ -54,17 +54,17 @@ private[persistence] trait InmemMessages { var messages = Map.empty[String, Vector[PersistentRepr]] def add(p: PersistentRepr): Unit = messages = messages + (messages.get(p.persistenceId) match { - case Some(ms) ⇒ p.persistenceId -> (ms :+ p) - case None ⇒ p.persistenceId -> Vector(p) + case Some(ms) ⇒ p.persistenceId → (ms :+ p) + case None ⇒ p.persistenceId → Vector(p) }) def update(pid: String, snr: Long)(f: PersistentRepr ⇒ PersistentRepr): Unit = messages = messages.get(pid) match { - case Some(ms) ⇒ messages + (pid -> ms.map(sp ⇒ if (sp.sequenceNr == snr) f(sp) else sp)) + case Some(ms) ⇒ messages + (pid → ms.map(sp ⇒ if (sp.sequenceNr == snr) f(sp) else sp)) case None ⇒ messages } def delete(pid: String, snr: Long): Unit = messages = messages.get(pid) match { - case Some(ms) ⇒ messages + (pid -> ms.filterNot(_.sequenceNr == snr)) + case Some(ms) ⇒ messages + (pid → ms.filterNot(_.sequenceNr == snr)) case None ⇒ messages } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala index a0b36cf..b63ada5 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala @@ -55,13 +55,13 @@ private[persistence] trait LeveldbIdMapping extends Actor { this: LeveldbStore val nextKey = keyFromBytes(nextEntry.getKey) if (!isMappingKey(nextKey)) pathMap else { val nextVal = new String(nextEntry.getValue, UTF_8) - readIdMap(pathMap + (nextVal -> nextKey.mappingId), iter) + readIdMap(pathMap + (nextVal → nextKey.mappingId), iter) } } } private def writeIdMapping(id: String, numericId: Int): Int = { - idMap = idMap + (id -> numericId) + idMap = idMap + (id → numericId) leveldb.put(keyToBytes(mappingKey(numericId)), id.getBytes(UTF_8)) newPersistenceIdAdded(id) numericId diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala index 29cfdd4..0db740c 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala @@ -12,8 +12,8 @@ import java.nio.ByteBuffer */ private[leveldb] final case class Key( persistenceId: Int, - sequenceNr: Long, - mappingId: Int) + sequenceNr: Long, + mappingId: Int) private[leveldb] object Key { def keyToBytes(key: Key): Array[Byte] = { diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala index 66ce16e..ed1f89b 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala @@ -31,19 +31,19 @@ object AtLeastOnceDeliverySpec { def senderProps(testActor: ActorRef, name: String, redeliverInterval: FiniteDuration, warnAfterNumberOfUnconfirmedAttempts: Int, redeliveryBurstLimit: Int, - destinations: Map[String, ActorPath], - async: Boolean, actorSelectionDelivery: Boolean = false): Props = + destinations: Map[String, ActorPath], + async: Boolean, actorSelectionDelivery: Boolean = false): Props = Props(new Sender(testActor, name, redeliverInterval, warnAfterNumberOfUnconfirmedAttempts, redeliveryBurstLimit, destinations, async, actorSelectionDelivery)) - class Sender(testActor: ActorRef, - name: String, - override val redeliverInterval: FiniteDuration, + class Sender(testActor: ActorRef, + name: String, + override val redeliverInterval: FiniteDuration, override val warnAfterNumberOfUnconfirmedAttempts: Int, - override val redeliveryBurstLimit: Int, - destinations: Map[String, ActorPath], - async: Boolean, - actorSelectionDelivery: Boolean) + override val redeliveryBurstLimit: Int, + destinations: Map[String, ActorPath], + async: Boolean, + actorSelectionDelivery: Boolean) extends PersistentActor with AtLeastOnceDelivery with ActorLogging { override def persistenceId: String = name @@ -179,7 +179,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c s"deliver messages in order when nothing is lost (using actorSelection: $deliverUsingActorSelection)" taggedAs (TimingTest) in { val probe = TestProbe() val probeA = TestProbe() - val destinations = Map("A" -> system.actorOf(destinationProps(probeA.ref)).path) + val destinations = Map("A" → system.actorOf(destinationProps(probeA.ref)).path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name) snd.tell(Req("a"), probe.ref) probe.expectMsg(ReqAck) @@ -191,7 +191,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) - val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path) + val destinations = Map("A" → system.actorOf(unreliableProps(3, dst)).path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false, actorSelectionDelivery = deliverUsingActorSelection), name) snd.tell(Req("a-1"), probe.ref) probe.expectMsg(ReqAck) @@ -222,7 +222,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) - val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path) + val destinations = Map("A" → system.actorOf(unreliableProps(3, dst)).path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name) snd.tell(Req("a-1"), probe.ref) probe.expectMsg(ReqAck) @@ -256,7 +256,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) - val destinations = Map("A" -> system.actorOf(unreliableProps(2, dst)).path) + val destinations = Map("A" → system.actorOf(unreliableProps(2, dst)).path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name) snd.tell(Req("a-1"), probe.ref) probe.expectMsg(ReqAck) @@ -293,7 +293,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) - val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path) + val destinations = Map("A" → system.actorOf(unreliableProps(3, dst)).path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = false), name) snd.tell(Req("a-1"), probe.ref) probe.expectMsg(ReqAck) @@ -331,7 +331,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val probe = TestProbe() val probeA = TestProbe() val probeB = TestProbe() - val destinations = Map("A" -> probeA.ref.path, "B" -> probeB.ref.path) + val destinations = Map("A" → probeA.ref.path, "B" → probeB.ref.path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 3, 1000, destinations, async = false), name) snd.tell(Req("a-1"), probe.ref) snd.tell(Req("b-1"), probe.ref) @@ -356,9 +356,9 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val dstB = system.actorOf(destinationProps(probeB.ref), "destination-b") val dstC = system.actorOf(destinationProps(probeC.ref), "destination-c") val destinations = Map( - "A" -> system.actorOf(unreliableProps(2, dstA), "unreliable-a").path, - "B" -> system.actorOf(unreliableProps(5, dstB), "unreliable-b").path, - "C" -> system.actorOf(unreliableProps(3, dstC), "unreliable-c").path) + "A" → system.actorOf(unreliableProps(2, dstA), "unreliable-a").path, + "B" → system.actorOf(unreliableProps(5, dstB), "unreliable-b").path, + "C" → system.actorOf(unreliableProps(3, dstC), "unreliable-c").path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = true), name) val N = 100 for (n ← 1 to N) { @@ -380,7 +380,7 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) - val destinations = Map("A" -> system.actorOf(unreliableProps(2, dst)).path) + val destinations = Map("A" → system.actorOf(unreliableProps(2, dst)).path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 2, destinations, async = true), name) diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala index 6b2f70d..0477f79 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala @@ -47,7 +47,7 @@ object SteppingInmemJournal { def getRef(instanceId: String): ActorRef = synchronized(_current(instanceId)) private def putRef(instanceId: String, instance: ActorRef): Unit = synchronized { - _current = _current + (instanceId -> instance) + _current = _current + (instanceId → instance) } private def remove(instanceId: String): Unit = synchronized( _current -= instanceId) diff --git a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala index 7559581..e845a13 100644 --- a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala +++ b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala @@ -90,8 +90,8 @@ class ResendUnfulfillableException final case class AckedSendBuffer[T <: HasSequenceNumber]( capacity: Int, nonAcked: IndexedSeq[T] = Vector.empty[T], - nacked: IndexedSeq[T] = Vector.empty[T], - maxSeq: SeqNo = SeqNo(-1)) { + nacked: IndexedSeq[T] = Vector.empty[T], + maxSeq: SeqNo = SeqNo(-1)) { /** * Processes an incoming acknowledgement and returns a new buffer with only unacknowledged elements remaining. @@ -137,9 +137,9 @@ final case class AckedSendBuffer[T <: HasSequenceNumber]( * @param buf Buffer of messages that are waiting for delivery */ final case class AckedReceiveBuffer[T <: HasSequenceNumber]( - lastDelivered: SeqNo = SeqNo(-1), - cumulativeAck: SeqNo = SeqNo(-1), - buf: SortedSet[T] = TreeSet.empty[T])(implicit val seqOrdering: Ordering[T]) { + lastDelivered: SeqNo = SeqNo(-1), + cumulativeAck: SeqNo = SeqNo(-1), + buf: SortedSet[T] = TreeSet.empty[T])(implicit val seqOrdering: Ordering[T]) { import SeqNo.ord.max diff --git a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala index a042834..560533a 100644 --- a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala @@ -28,8 +28,9 @@ import akka.util.Helpers.ConfigOps */ class DeadlineFailureDetector( val acceptableHeartbeatPause: FiniteDuration, - val heartbeatInterval: FiniteDuration)( - implicit clock: Clock) extends FailureDetector { + val heartbeatInterval: FiniteDuration)( + implicit + clock: Clock) extends FailureDetector { /** * Constructor that reads parameters from config. diff --git a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala index 5d20087..20462a5 100644 --- a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala @@ -48,7 +48,7 @@ class DefaultFailureDetectorRegistry[A](detectorFactory: () ⇒ FailureDetector) case None ⇒ val newDetector: FailureDetector = detectorFactory() newDetector.heartbeat() - resourceToFailureDetector.set(oldTable + (resource -> newDetector)) + resourceToFailureDetector.set(oldTable + (resource → newDetector)) } } finally failureDetectorCreationLock.unlock() } diff --git a/akka-remote/src/main/scala/akka/remote/Endpoint.scala b/akka-remote/src/main/scala/akka/remote/Endpoint.scala index 58510e1..ff417b7 100644 --- a/akka-remote/src/main/scala/akka/remote/Endpoint.scala +++ b/akka-remote/src/main/scala/akka/remote/Endpoint.scala @@ -32,25 +32,25 @@ import scala.concurrent.Future * INTERNAL API */ private[remote] trait InboundMessageDispatcher { - def dispatch(recipient: InternalActorRef, - recipientAddress: Address, + def dispatch(recipient: InternalActorRef, + recipientAddress: Address, serializedMessage: SerializedMessage, - senderOption: Option[ActorRef]): Unit + senderOption: Option[ActorRef]): Unit } /** * INTERNAL API */ -private[remote] class DefaultMessageDispatcher(private val system: ExtendedActorSystem, +private[remote] class DefaultMessageDispatcher(private val system: ExtendedActorSystem, private val provider: RemoteActorRefProvider, - private val log: LoggingAdapter) extends InboundMessageDispatcher { + private val log: LoggingAdapter) extends InboundMessageDispatcher { private val remoteDaemon = provider.remoteDaemon - override def dispatch(recipient: InternalActorRef, - recipientAddress: Address, + override def dispatch(recipient: InternalActorRef, + recipientAddress: Address, serializedMessage: SerializedMessage, - senderOption: Option[ActorRef]): Unit = { + senderOption: Option[ActorRef]): Unit = { import provider.remoteSettings._ @@ -129,9 +129,9 @@ private[remote] final case class ShutDownAssociation(localAddress: Address, remo * INTERNAL API */ @SerialVersionUID(2L) -private[remote] final case class InvalidAssociation(localAddress: Address, - remoteAddress: Address, - cause: Throwable, +private[remote] final case class InvalidAssociation(localAddress: Address, + remoteAddress: Address, + cause: Throwable, disassociationInfo: Option[DisassociateInfo] = None) extends EndpointException("Invalid address: " + remoteAddress, cause) with AssociationProblem @@ -174,12 +174,12 @@ private[remote] object ReliableDeliverySupervisor { def props( handleOrActive: Option[AkkaProtocolHandle], - localAddress: Address, - remoteAddress: Address, - refuseUid: Option[Int], - transport: AkkaProtocolTransport, - settings: RemoteSettings, - codec: AkkaPduCodec, + localAddress: Address, + remoteAddress: Address, + refuseUid: Option[Int], + transport: AkkaProtocolTransport, + settings: RemoteSettings, + codec: AkkaPduCodec, receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props = Props(classOf[ReliableDeliverySupervisor], handleOrActive, localAddress, remoteAddress, refuseUid, transport, settings, codec, receiveBuffers) @@ -189,13 +189,13 @@ private[remote] object ReliableDeliverySupervisor { * INTERNAL API */ private[remote] class ReliableDeliverySupervisor( - handleOrActive: Option[AkkaProtocolHandle], - val localAddress: Address, - val remoteAddress: Address, - val refuseUid: Option[Int], - val transport: AkkaProtocolTransport, - val settings: RemoteSettings, - val codec: AkkaPduCodec, + handleOrActive: Option[AkkaProtocolHandle], + val localAddress: Address, + val remoteAddress: Address, + val refuseUid: Option[Int], + val transport: AkkaProtocolTransport, + val settings: RemoteSettings, + val codec: AkkaPduCodec, val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends Actor with ActorLogging { import ReliableDeliverySupervisor._ import context.dispatcher @@ -436,11 +436,11 @@ private[remote] class ReliableDeliverySupervisor( * INTERNAL API */ private[remote] abstract class EndpointActor( - val localAddress: Address, + val localAddress: Address, val remoteAddress: Address, - val transport: Transport, - val settings: RemoteSettings, - val codec: AkkaPduCodec) extends Actor with ActorLogging { + val transport: Transport, + val settings: RemoteSettings, + val codec: AkkaPduCodec) extends Actor with ActorLogging { def inbound: Boolean @@ -462,14 +462,14 @@ private[remote] abstract class EndpointActor( private[remote] object EndpointWriter { def props( - handleOrActive: Option[AkkaProtocolHandle], - localAddress: Address, - remoteAddress: Address, - refuseUid: Option[Int], - transport: AkkaProtocolTransport, - settings: RemoteSettings, - codec: AkkaPduCodec, - receiveBuffers: ConcurrentHashMap[Link, ResendState], + handleOrActive: Option[AkkaProtocolHandle], + localAddress: Address, + remoteAddress: Address, + refuseUid: Option[Int], + transport: AkkaProtocolTransport, + settings: RemoteSettings, + codec: AkkaPduCodec, + receiveBuffers: ConcurrentHashMap[Link, ResendState], reliableDeliverySupervisor: Option[ActorRef]): Props = Props(classOf[EndpointWriter], handleOrActive, localAddress, remoteAddress, refuseUid, transport, settings, codec, receiveBuffers, reliableDeliverySupervisor) @@ -507,14 +507,14 @@ private[remote] object EndpointWriter { * INTERNAL API */ private[remote] class EndpointWriter( - handleOrActive: Option[AkkaProtocolHandle], - localAddress: Address, - remoteAddress: Address, - refuseUid: Option[Int], - transport: AkkaProtocolTransport, - settings: RemoteSettings, - codec: AkkaPduCodec, - val receiveBuffers: ConcurrentHashMap[Link, ResendState], + handleOrActive: Option[AkkaProtocolHandle], + localAddress: Address, + remoteAddress: Address, + refuseUid: Option[Int], + transport: AkkaProtocolTransport, + settings: RemoteSettings, + codec: AkkaPduCodec, + val receiveBuffers: ConcurrentHashMap[Link, ResendState], val reliableDeliverySupervisor: Option[ActorRef]) extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) { @@ -881,16 +881,16 @@ private[remote] class EndpointWriter( private[remote] object EndpointReader { def props( - localAddress: Address, - remoteAddress: Address, - transport: Transport, - settings: RemoteSettings, - codec: AkkaPduCodec, - msgDispatch: InboundMessageDispatcher, - inbound: Boolean, - uid: Int, + localAddress: Address, + remoteAddress: Address, + transport: Transport, + settings: RemoteSettings, + codec: AkkaPduCodec, + msgDispatch: InboundMessageDispatcher, + inbound: Boolean, + uid: Int, reliableDeliverySupervisor: Option[ActorRef], - receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props = + receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props = Props(classOf[EndpointReader], localAddress, remoteAddress, transport, settings, codec, msgDispatch, inbound, uid, reliableDeliverySupervisor, receiveBuffers) @@ -900,16 +900,16 @@ private[remote] object EndpointReader { * INTERNAL API */ private[remote] class EndpointReader( - localAddress: Address, - remoteAddress: Address, - transport: Transport, - settings: RemoteSettings, - codec: AkkaPduCodec, - msgDispatch: InboundMessageDispatcher, - val inbound: Boolean, - val uid: Int, + localAddress: Address, + remoteAddress: Address, + transport: Transport, + settings: RemoteSettings, + codec: AkkaPduCodec, + msgDispatch: InboundMessageDispatcher, + val inbound: Boolean, + val uid: Int, val reliableDeliverySupervisor: Option[ActorRef], - val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) { + val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) { import EndpointWriter.{ OutboundAck, StopReading, StoppedReading } diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala index edc7cdf..49fd506 100644 --- a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala @@ -67,11 +67,11 @@ private[akka] object FailureDetectorLoader { def load(fqcn: String, config: Config, system: ActorSystem): FailureDetector = { system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[FailureDetector]( fqcn, List( - classOf[Config] -> config, - classOf[EventStream] -> system.eventStream)).recover({ - case e ⇒ throw new ConfigurationException( - s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e) - }).get + classOf[Config] → config, + classOf[EventStream] → system.eventStream)).recover({ + case e ⇒ throw new ConfigurationException( + s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e) + }).get } /** diff --git a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala index 2bff7f1..8f208d3 100644 --- a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala @@ -54,12 +54,13 @@ import akka.util.Helpers.ConfigOps * purposes. It is only used for measuring intervals (duration). */ class PhiAccrualFailureDetector( - val threshold: Double, - val maxSampleSize: Int, - val minStdDeviation: FiniteDuration, + val threshold: Double, + val maxSampleSize: Int, + val minStdDeviation: FiniteDuration, val acceptableHeartbeatPause: FiniteDuration, - val firstHeartbeatEstimate: FiniteDuration)( - implicit clock: Clock) extends FailureDetector { + val firstHeartbeatEstimate: FiniteDuration)( + implicit + clock: Clock) extends FailureDetector { /** * Constructor that reads parameters from config. @@ -203,9 +204,9 @@ private[akka] object HeartbeatHistory { * for empty HeartbeatHistory, i.e. throws ArithmeticException. */ private[akka] final case class HeartbeatHistory private ( - maxSampleSize: Int, - intervals: immutable.IndexedSeq[Long], - intervalSum: Long, + maxSampleSize: Int, + intervals: immutable.IndexedSeq[Long], + intervalSum: Long, squaredIntervalSum: Long) { // Heartbeat histories are created trough the firstHeartbeat variable of the PhiAccrualFailureDetector diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 6e52871..2dd6a05 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -73,8 +73,8 @@ private[akka] object RemoteActorRefProvider { * and handled as dead letters to the original (remote) destination. Without this special case, DeathWatch related * functionality breaks, like the special handling of Watch messages arriving to dead letters. */ - private class RemoteDeadLetterActorRef(_provider: ActorRefProvider, - _path: ActorPath, + private class RemoteDeadLetterActorRef(_provider: ActorRefProvider, + _path: ActorPath, _eventStream: EventStream) extends DeadLetterActorRef(_provider, _path, _eventStream) { import EndpointManager.Send @@ -103,9 +103,9 @@ private[akka] object RemoteActorRefProvider { * */ private[akka] class RemoteActorRefProvider( - val systemName: String, - val settings: ActorSystem.Settings, - val eventStream: EventStream, + val systemName: String, + val settings: ActorSystem.Settings, + val eventStream: EventStream, val dynamicAccess: DynamicAccess) extends ActorRefProvider { import RemoteActorRefProvider._ @@ -434,12 +434,12 @@ private[akka] trait RemoteRef extends ActorRefScope { * This reference is network-aware (remembers its origin) and immutable. */ private[akka] class RemoteActorRef private[akka] ( - remote: RemoteTransport, + remote: RemoteTransport, val localAddressToUse: Address, - val path: ActorPath, - val getParent: InternalActorRef, - props: Option[Props], - deploy: Option[Deploy]) + val path: ActorPath, + val getParent: InternalActorRef, + props: Option[Props], + deploy: Option[Deploy]) extends InternalActorRef with RemoteRef { def getChild(name: Iterator[String]): InternalActorRef = { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index b1b744f..618b71a 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -42,11 +42,11 @@ private[akka] final case class DaemonMsgCreate(props: Props, deploy: Deploy, pat * It acts as the brain of the remote that responds to system remote events (messages) and undertakes action. */ private[akka] class RemoteSystemDaemon( - system: ActorSystemImpl, - _path: ActorPath, - _parent: InternalActorRef, - terminator: ActorRef, - _log: LoggingAdapter, + system: ActorSystemImpl, + _path: ActorPath, + _parent: InternalActorRef, + terminator: ActorRef, + _log: LoggingAdapter, val untrustedMode: Boolean) extends VirtualPathContainer(system.provider, _path, _parent, _log) { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala index 2c8d248..cbcc36e 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala @@ -30,7 +30,7 @@ private[akka] class RemoteDeploymentWatcher extends Actor with RequiresMessageQu def receive = { case WatchRemote(a, supervisor: InternalActorRef) ⇒ - supervisors += (a -> supervisor) + supervisors += (a → supervisor) context.watch(a) case t @ Terminated(a) if supervisors isDefinedAt a ⇒ diff --git a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala index 4fc7e0c..3c372c0 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala @@ -21,9 +21,9 @@ private[akka] object RemoteWatcher { * Factory method for `RemoteWatcher` [[akka.actor.Props]]. */ def props( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, + failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, heartbeatExpectedResponseAfter: FiniteDuration): Props = Props(classOf[RemoteWatcher], failureDetector, heartbeatInterval, unreachableReaperInterval, heartbeatExpectedResponseAfter).withDeploy(Deploy.local) @@ -44,7 +44,7 @@ private[akka] object RemoteWatcher { lazy val empty: Stats = counts(0, 0) def counts(watching: Int, watchingNodes: Int): Stats = Stats(watching, watchingNodes)(Set.empty, Set.empty) } - final case class Stats(watching: Int, watchingNodes: Int)(val watchingRefs: Set[(ActorRef, ActorRef)], + final case class Stats(watching: Int, watchingNodes: Int)(val watchingRefs: Set[(ActorRef, ActorRef)], val watchingAddresses: Set[Address]) { override def toString: String = { def formatWatchingRefs: String = @@ -78,9 +78,9 @@ private[akka] object RemoteWatcher { * */ private[akka] class RemoteWatcher( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, + failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, heartbeatExpectedResponseAfter: FiniteDuration) extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { diff --git a/akka-remote/src/main/scala/akka/remote/Remoting.scala b/akka-remote/src/main/scala/akka/remote/Remoting.scala index ad45c4a..d45fd2a 100644 --- a/akka-remote/src/main/scala/akka/remote/Remoting.scala +++ b/akka-remote/src/main/scala/akka/remote/Remoting.scala @@ -183,7 +183,7 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc transportMapping = transports.groupBy { case (transport, _) ⇒ transport.schemeIdentifier - } map { case (k, v) ⇒ k -> v.toSet } + } map { case (k, v) ⇒ k → v.toSet } defaultAddress = transports.head._2 addresses = transports.map { _._2 }.toSet @@ -232,7 +232,7 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc private[akka] def boundAddresses: Map[String, Set[Address]] = { transportMapping.map { case (scheme, transports) ⇒ - scheme -> transports.flatMap { + scheme → transports.flatMap { // Need to do like this for binary compatibility reasons case (t, _) ⇒ Option(t.boundAddress) } @@ -265,7 +265,7 @@ private[remote] object EndpointManager { // Messages internal to EndpointManager case object Prune extends NoSerializationVerificationNeeded final case class ListensResult(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]], - results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])]) + results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])]) extends NoSerializationVerificationNeeded final case class ListensFailure(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]], cause: Throwable) extends NoSerializationVerificationNeeded @@ -304,21 +304,21 @@ private[remote] object EndpointManager { case Some(Pass(e, _, _)) ⇒ throw new IllegalArgumentException(s"Attempting to overwrite existing endpoint [$e] with [$endpoint]") case _ ⇒ - addressToWritable += address -> Pass(endpoint, uid, refuseUid) - writableToAddress += endpoint -> address + addressToWritable += address → Pass(endpoint, uid, refuseUid) + writableToAddress += endpoint → address endpoint } def registerWritableEndpointUid(remoteAddress: Address, uid: Int): Unit = { addressToWritable.get(remoteAddress) match { - case Some(Pass(ep, _, refuseUid)) ⇒ addressToWritable += remoteAddress -> Pass(ep, Some(uid), refuseUid) + case Some(Pass(ep, _, refuseUid)) ⇒ addressToWritable += remoteAddress → Pass(ep, Some(uid), refuseUid) case other ⇒ // the GotUid might have lost the race with some failure } } def registerReadOnlyEndpoint(address: Address, endpoint: ActorRef, uid: Int): ActorRef = { - addressToReadonly += address -> ((endpoint, uid)) - readonlyToAddress += endpoint -> address + addressToReadonly += address → ((endpoint, uid)) + readonlyToAddress += endpoint → address endpoint } @@ -371,7 +371,7 @@ private[remote] object EndpointManager { */ def markAsFailed(endpoint: ActorRef, timeOfRelease: Deadline): Unit = if (isWritable(endpoint)) { - addressToWritable += writableToAddress(endpoint) -> Gated(timeOfRelease) + addressToWritable += writableToAddress(endpoint) → Gated(timeOfRelease) writableToAddress -= endpoint } else if (isReadOnly(endpoint)) { addressToReadonly -= readonlyToAddress(endpoint) @@ -379,7 +379,7 @@ private[remote] object EndpointManager { } def markAsQuarantined(address: Address, uid: Int, timeOfRelease: Deadline): Unit = - addressToWritable += address -> Quarantined(uid, timeOfRelease) + addressToWritable += address → Quarantined(uid, timeOfRelease) def removePolicy(address: Address): Unit = addressToWritable -= address @@ -509,13 +509,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends } map { case (a, t) if t.size > 1 ⇒ throw new RemoteTransportException(s"There are more than one transports listening on local address [$a]", null) - case (a, t) ⇒ a -> t.head._1 + case (a, t) ⇒ a → t.head._1 } // Register to each transport as listener and collect mapping to addresses val transportsAndAddresses = results map { case (transport, address, promise) ⇒ promise.success(ActorAssociationEventListener(self)) - transport -> address + transport → address } addressesPromise.success(transportsAndAddresses) case ListensFailure(addressesPromise, cause) ⇒ @@ -582,7 +582,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // Stop all matching stashed connections stashedInbound = stashedInbound.map { case (writer, associations) ⇒ - writer -> associations.filter { assoc ⇒ + writer → associations.filter { assoc ⇒ val handle = assoc.association.asInstanceOf[AkkaProtocolHandle] val drop = matchesQuarantine(handle) if (drop) handle.disassociate() @@ -677,7 +677,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends case ia @ InboundAssociation(handle: AkkaProtocolHandle) ⇒ endpoints.readOnlyEndpointFor(handle.remoteAddress) match { case Some((endpoint, _)) ⇒ pendingReadHandoffs.get(endpoint) foreach (_.disassociate()) - pendingReadHandoffs += endpoint -> handle + pendingReadHandoffs += endpoint → handle endpoint ! EndpointWriter.TakeOver(handle, self) endpoints.writableEndpointWithPolicyFor(handle.remoteAddress) match { case Some(Pass(ep, _, _)) ⇒ ep ! ReliableDeliverySupervisor.Ungate @@ -692,13 +692,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // to get an unstash event if (!writerIsIdle) { ep ! ReliableDeliverySupervisor.IsIdle - stashedInbound += ep -> (stashedInbound.getOrElse(ep, Vector.empty) :+ ia) + stashedInbound += ep → (stashedInbound.getOrElse(ep, Vector.empty) :+ ia) } else createAndRegisterEndpoint(handle, refuseUid = endpoints.refuseUid(handle.remoteAddress)) case Some(Pass(ep, Some(uid), _)) ⇒ if (handle.handshakeInfo.uid == uid) { pendingReadHandoffs.get(ep) foreach (_.disassociate()) - pendingReadHandoffs += ep -> handle + pendingReadHandoffs += ep → handle ep ! EndpointWriter.StopReading(ep, self) ep ! ReliableDeliverySupervisor.Ungate } else { @@ -743,7 +743,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends */ val transports: Seq[AkkaProtocolTransport] = for ((fqn, adapters, config) ← settings.Transports) yield { - val args = Seq(classOf[ExtendedActorSystem] -> context.system, classOf[Config] -> config) + val args = Seq(classOf[ExtendedActorSystem] → context.system, classOf[Config] → config) // Loads the driver -- the bottom element of the chain. // The chain at this point: @@ -802,13 +802,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends pendingReadHandoffs -= takingOverFrom } - private def createEndpoint(remoteAddress: Address, - localAddress: Address, - transport: AkkaProtocolTransport, + private def createEndpoint(remoteAddress: Address, + localAddress: Address, + transport: AkkaProtocolTransport, endpointSettings: RemoteSettings, - handleOption: Option[AkkaProtocolHandle], - writing: Boolean, - refuseUid: Option[Int]): ActorRef = { + handleOption: Option[AkkaProtocolHandle], + writing: Boolean, + refuseUid: Option[Int]): ActorRef = { require(transportMapping contains localAddress, "Transport mapping is not defined for the address") // refuseUid is ignored for read-only endpoints since the UID of the remote system is already known and has passed // quarantine checks diff --git a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala index f988447..1f83506 100644 --- a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala +++ b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala @@ -26,9 +26,9 @@ sealed trait AssociationEvent extends RemotingLifecycleEvent { @SerialVersionUID(1L) final case class AssociatedEvent( - localAddress: Address, + localAddress: Address, remoteAddress: Address, - inbound: Boolean) + inbound: Boolean) extends AssociationEvent { protected override def eventName: String = "Associated" @@ -38,9 +38,9 @@ final case class AssociatedEvent( @SerialVersionUID(1L) final case class DisassociatedEvent( - localAddress: Address, + localAddress: Address, remoteAddress: Address, - inbound: Boolean) + inbound: Boolean) extends AssociationEvent { protected override def eventName: String = "Disassociated" override def logLevel: Logging.LogLevel = Logging.DebugLevel @@ -48,11 +48,11 @@ final case class DisassociatedEvent( @SerialVersionUID(1L) final case class AssociationErrorEvent( - cause: Throwable, - localAddress: Address, + cause: Throwable, + localAddress: Address, remoteAddress: Address, - inbound: Boolean, - logLevel: Logging.LogLevel) extends AssociationEvent { + inbound: Boolean, + logLevel: Logging.LogLevel) extends AssociationEvent { protected override def eventName: String = "AssociationError" override def toString: String = s"${super.toString}: Error [${cause.getMessage}] [${Logging.stackTraceFor(cause)}]" def getCause: Throwable = cause diff --git a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala index 548b5ae..11fc0c8 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala @@ -27,7 +27,7 @@ class TransportAdapters(system: ExtendedActorSystem) extends Extension { val settings = RARP(system).provider.remoteSettings private val adaptersTable: Map[String, TransportAdapterProvider] = for ((name, fqn) ← settings.Adapters) yield { - name -> system.dynamicAccess.createInstanceFor[TransportAdapterProvider](fqn, immutable.Seq.empty).recover({ + name → system.dynamicAccess.createInstanceFor[TransportAdapterProvider](fqn, immutable.Seq.empty).recover({ case e ⇒ throw new IllegalArgumentException(s"Cannot instantiate transport adapter [${fqn}]", e) }).get } @@ -68,7 +68,7 @@ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transpor protected def maximumOverhead: Int - protected def interceptListen(listenAddress: Address, + protected def interceptListen(listenAddress: Address, listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit @@ -116,9 +116,9 @@ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transpor } -abstract class AbstractTransportAdapterHandle(val originalLocalAddress: Address, +abstract class AbstractTransportAdapterHandle(val originalLocalAddress: Address, val originalRemoteAddress: Address, - val wrappedHandle: AssociationHandle, + val wrappedHandle: AssociationHandle, val addedSchemeIdentifier: String) extends AssociationHandle with SchemeAugmenter { @@ -138,7 +138,7 @@ object ActorTransportAdapter { final case class ListenerRegistered(listener: AssociationEventListener) extends TransportOperation final case class AssociateUnderlying(remoteAddress: Address, statusPromise: Promise[AssociationHandle]) extends TransportOperation - final case class ListenUnderlying(listenAddress: Address, + final case class ListenUnderlying(listenAddress: Address, upstreamListener: Future[AssociationEventListener]) extends TransportOperation final case class DisassociateUnderlying(info: DisassociateInfo = AssociationHandle.Unknown) extends TransportOperation with DeadLetterSuppression @@ -159,7 +159,7 @@ abstract class ActorTransportAdapter(wrappedTransport: Transport, system: ActorS private def registerManager(): Future[ActorRef] = (system.actorSelection("/system/transports") ? RegisterTransportActor(managerProps, managerName)).mapTo[ActorRef] - override def interceptListen(listenAddress: Address, + override def interceptListen(listenAddress: Address, listenerPromise: Future[AssociationEventListener]): Future[AssociationEventListener] = { registerManager().map { mgr ⇒ // Side effecting: storing the manager instance in volatile var diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala index 8415e2e..3bc10b9 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala @@ -34,11 +34,11 @@ private[remote] object AkkaPduCodec { case object Heartbeat extends AkkaPdu final case class Payload(bytes: ByteString) extends AkkaPdu - final case class Message(recipient: InternalActorRef, - recipientAddress: Address, + final case class Message(recipient: InternalActorRef, + recipientAddress: Address, serializedMessage: SerializedMessage, - senderOption: Option[ActorRef], - seqOption: Option[SeqNo]) extends HasSequenceNumber { + senderOption: Option[ActorRef], + seqOption: Option[SeqNo]) extends HasSequenceNumber { def reliableDeliveryEnabled = seqOption.isDefined @@ -93,12 +93,12 @@ private[remote] trait AkkaPduCodec { def decodeMessage(raw: ByteString, provider: RemoteActorRefProvider, localAddress: Address): (Option[Ack], Option[Message]) def constructMessage( - localAddress: Address, - recipient: ActorRef, + localAddress: Address, + recipient: ActorRef, serializedMessage: SerializedMessage, - senderOption: Option[ActorRef], - seqOption: Option[SeqNo] = None, - ackOption: Option[Ack] = None): ByteString + senderOption: Option[ActorRef], + seqOption: Option[SeqNo] = None, + ackOption: Option[Ack] = None): ByteString def constructPureAck(ack: Ack): ByteString } @@ -117,12 +117,12 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { } override def constructMessage( - localAddress: Address, - recipient: ActorRef, + localAddress: Address, + recipient: ActorRef, serializedMessage: SerializedMessage, - senderOption: Option[ActorRef], - seqOption: Option[SeqNo] = None, - ackOption: Option[Ack] = None): ByteString = { + senderOption: Option[ActorRef], + seqOption: Option[SeqNo] = None, + ackOption: Option[Ack] = None): ByteString = { val ackAndEnvelopeBuilder = AckAndEnvelopeContainer.newBuilder @@ -175,8 +175,8 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { } override def decodeMessage( - raw: ByteString, - provider: RemoteActorRefProvider, + raw: ByteString, + provider: RemoteActorRefProvider, localAddress: Address): (Option[Ack], Option[Message]) = { val ackAndEnvelope = AckAndEnvelopeContainer.parseFrom(raw.toArray) @@ -225,7 +225,7 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { Address(encodedAddress.getProtocol, encodedAddress.getSystem, encodedAddress.getHostname, encodedAddress.getPort) private def constructControlMessagePdu( - code: WireFormats.CommandType, + code: WireFormats.CommandType, handshakeInfo: Option[AkkaHandshakeInfo.Builder]): ByteString = { val controlMessageBuilder = AkkaControlMessage.newBuilder() diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala index 187237e..00a3811 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala @@ -64,7 +64,7 @@ private[remote] object AkkaProtocolTransport { //Couldn't these go into the Remo final case class AssociateUnderlyingRefuseUid( remoteAddress: Address, statusPromise: Promise[AssociationHandle], - refuseUid: Option[Int]) extends NoSerializationVerificationNeeded + refuseUid: Option[Int]) extends NoSerializationVerificationNeeded } final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String]) @@ -93,10 +93,10 @@ final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String] * the codec that will be used to encode/decode Akka PDUs */ private[remote] class AkkaProtocolTransport( - wrappedTransport: Transport, - private val system: ActorSystem, + wrappedTransport: Transport, + private val system: ActorSystem, private val settings: AkkaProtocolSettings, - private val codec: AkkaPduCodec) extends ActorTransportAdapter(wrappedTransport, system) { + private val codec: AkkaPduCodec) extends ActorTransportAdapter(wrappedTransport, system) { override val addedSchemeIdentifier: String = AkkaScheme @@ -122,7 +122,7 @@ private[remote] class AkkaProtocolTransport( private[transport] class AkkaProtocolManager( private val wrappedTransport: Transport, - private val settings: AkkaProtocolSettings) + private val settings: AkkaProtocolSettings) extends ActorTransportAdapterManager { // The AkkaProtocolTransport does not handle the recovery of associations, this task is implemented in the @@ -158,7 +158,7 @@ private[transport] class AkkaProtocolManager( private def createOutboundStateActor( remoteAddress: Address, statusPromise: Promise[AssociationHandle], - refuseUid: Option[Int]): Unit = { + refuseUid: Option[Int]): Unit = { val stateActorLocalAddress = localAddress val stateActorSettings = settings @@ -181,13 +181,13 @@ private[transport] class AkkaProtocolManager( } private[remote] class AkkaProtocolHandle( - _localAddress: Address, - _remoteAddress: Address, + _localAddress: Address, + _remoteAddress: Address, val readHandlerPromise: Promise[HandleEventListener], - _wrappedHandle: AssociationHandle, - val handshakeInfo: HandshakeInfo, + _wrappedHandle: AssociationHandle, + val handshakeInfo: HandshakeInfo, private val stateActor: ActorRef, - private val codec: AkkaPduCodec) + private val codec: AkkaPduCodec) extends AbstractTransportAdapterHandle(_localAddress, _remoteAddress, _wrappedHandle, AkkaScheme) { override def write(payload: ByteString): Boolean = wrappedHandle.write(codec.constructPayload(payload)) @@ -257,34 +257,34 @@ private[transport] object ProtocolStateActor { case object ForbiddenUidReason private[remote] def outboundProps( - handshakeInfo: HandshakeInfo, - remoteAddress: Address, - statusPromise: Promise[AssociationHandle], - transport: Transport, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, + handshakeInfo: HandshakeInfo, + remoteAddress: Address, + statusPromise: Promise[AssociationHandle], + transport: Transport, + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, failureDetector: FailureDetector, - refuseUid: Option[Int]): Props = + refuseUid: Option[Int]): Props = Props(classOf[ProtocolStateActor], handshakeInfo, remoteAddress, statusPromise, transport, settings, codec, failureDetector, refuseUid).withDeploy(Deploy.local) private[remote] def inboundProps( - handshakeInfo: HandshakeInfo, - wrappedHandle: AssociationHandle, + handshakeInfo: HandshakeInfo, + wrappedHandle: AssociationHandle, associationListener: AssociationEventListener, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, - failureDetector: FailureDetector): Props = + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, + failureDetector: FailureDetector): Props = Props(classOf[ProtocolStateActor], handshakeInfo, wrappedHandle, associationListener, settings, codec, failureDetector).withDeploy(Deploy.local) } -private[transport] class ProtocolStateActor(initialData: InitialProtocolStateData, +private[transport] class ProtocolStateActor(initialData: InitialProtocolStateData, private val localHandshakeInfo: HandshakeInfo, - private val refuseUid: Option[Int], - private val settings: AkkaProtocolSettings, - private val codec: AkkaPduCodec, - private val failureDetector: FailureDetector) + private val refuseUid: Option[Int], + private val settings: AkkaProtocolSettings, + private val codec: AkkaPduCodec, + private val failureDetector: FailureDetector) extends Actor with FSM[AssociationState, ProtocolStateData] with RequiresMessageQueue[UnboundedMessageQueueSemantics] { @@ -292,24 +292,24 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat import context.dispatcher // Outbound case - def this(handshakeInfo: HandshakeInfo, - remoteAddress: Address, - statusPromise: Promise[AssociationHandle], - transport: Transport, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, + def this(handshakeInfo: HandshakeInfo, + remoteAddress: Address, + statusPromise: Promise[AssociationHandle], + transport: Transport, + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, failureDetector: FailureDetector, - refuseUid: Option[Int]) = { + refuseUid: Option[Int]) = { this(OutboundUnassociated(remoteAddress, statusPromise, transport), handshakeInfo, refuseUid, settings, codec, failureDetector) } // Inbound case - def this(handshakeInfo: HandshakeInfo, - wrappedHandle: AssociationHandle, + def this(handshakeInfo: HandshakeInfo, + wrappedHandle: AssociationHandle, associationListener: AssociationEventListener, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, - failureDetector: FailureDetector) = { + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, + failureDetector: FailureDetector) = { this(InboundUnassociated(associationListener, wrappedHandle), handshakeInfo, refuseUid = None, settings, codec, failureDetector) } @@ -599,8 +599,8 @@ private[transport] class ProtocolStateActor(initialData: InitialProtocolStateDat readHandlerPromise.future } - private def notifyInboundHandler(wrappedHandle: AssociationHandle, - handshakeInfo: HandshakeInfo, + private def notifyInboundHandler(wrappedHandle: AssociationHandle, + handshakeInfo: HandshakeInfo, associationListener: AssociationEventListener): Future[HandleEventListener] = { val readHandlerPromise = Promise[HandleEventListener]() listenForListenerRegistration(readHandlerPromise) diff --git a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala index 6f367d1..9a36056 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala @@ -77,7 +77,7 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor case _ ⇒ wrappedTransport.managementCommand(cmd) } - protected def interceptListen(listenAddress: Address, + protected def interceptListen(listenAddress: Address, listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = { log.warning("FailureInjectorTransport is active on this system. Gremlins might munch your packets.") listenerFuture.onSuccess { @@ -140,7 +140,7 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor /** * INTERNAL API */ -private[remote] final case class FailureInjectorHandle(_wrappedHandle: AssociationHandle, +private[remote] final case class FailureInjectorHandle(_wrappedHandle: AssociationHandle, private val gremlinAdapter: FailureInjectorTransportAdapter) extends AbstractTransportAdapterHandle(_wrappedHandle, FailureInjectorSchemeIdentifier) with HandleEventListener { diff --git a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala index 8d5de1e..746a731 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala @@ -24,10 +24,10 @@ import scala.concurrent.ExecutionContext.Implicits.global * production systems. */ class TestTransport( - val localAddress: Address, - final val registry: AssociationRegistry, - val maximumPayloadBytes: Int = 32000, - val schemeIdentifier: String = "test") extends Transport { + val localAddress: Address, + final val registry: AssociationRegistry, + val maximumPayloadBytes: Int = 32000, + val schemeIdentifier: String = "test") extends Transport { def this(system: ExtendedActorSystem, conf: Config) = { this( @@ -299,7 +299,7 @@ object TestTransport { * @param listenerPair pair of listeners in initiator, receiver order. * @return */ - def remoteListenerRelativeTo(handle: TestAssociationHandle, + def remoteListenerRelativeTo(handle: TestAssociationHandle, listenerPair: (HandleEventListener, HandleEventListener)): HandleEventListener = { listenerPair match { case (initiator, receiver) ⇒ if (handle.inbound) initiator else receiver @@ -448,10 +448,10 @@ object AssociationRegistry { } final case class TestAssociationHandle( - localAddress: Address, + localAddress: Address, remoteAddress: Address, - transport: TestTransport, - inbound: Boolean) extends AssociationHandle { + transport: TestTransport, + inbound: Boolean) extends AssociationHandle { @volatile var writable = true diff --git a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala index b24beb2..6e9d525 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala @@ -232,7 +232,7 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) extends A val inMode = getInboundMode(naked) wrappedHandle.outboundThrottleMode.set(getOutboundMode(naked)) wrappedHandle.readHandlerPromise.future map { ListenerAndMode(_, inMode) } pipeTo wrappedHandle.throttlerActor - handleTable ::= naked -> wrappedHandle + handleTable ::= naked → wrappedHandle statusPromise.success(wrappedHandle) case SetThrottle(address, direction, mode) ⇒ val naked = nakedAddress(address) @@ -259,7 +259,7 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) extends A case Checkin(origin, handle) ⇒ val naked: Address = nakedAddress(origin) - handleTable ::= naked -> handle + handleTable ::= naked → handle setMode(naked, handle) } @@ -360,10 +360,10 @@ private[transport] object ThrottledAssociation { * INTERNAL API */ private[transport] class ThrottledAssociation( - val manager: ActorRef, + val manager: ActorRef, val associationHandler: AssociationEventListener, - val originalHandle: AssociationHandle, - val inbound: Boolean) + val originalHandle: AssociationHandle, + val inbound: Boolean) extends Actor with LoggingFSM[ThrottledAssociation.ThrottlerState, ThrottledAssociation.ThrottlerData] with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import ThrottledAssociation._ diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala index 2df8334..81808e9 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala @@ -12,7 +12,7 @@ import akka.remote.transport.netty.NettyTransportSettings.{ Udp, Tcp, Mode } import akka.remote.transport.{ AssociationHandle, Transport } import akka.{ OnlyCauseStackTrace, ConfigurationException } import com.typesafe.config.Config -import java.net.{ SocketAddress, InetAddress, InetSocketAddress} +import java.net.{ SocketAddress, InetAddress, InetSocketAddress } import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{ ConcurrentHashMap, Executors, CancellationException } import org.jboss.netty.bootstrap.{ ConnectionlessBootstrap, Bootstrap, ClientBootstrap, ServerBootstrap } @@ -22,7 +22,7 @@ import org.jboss.netty.channel.group.{ DefaultChannelGroup, ChannelGroup, Channe import org.jboss.netty.channel.socket.nio.{ NioWorkerPool, NioDatagramChannelFactory, NioServerSocketChannelFactory, NioClientSocketChannelFactory } import org.jboss.netty.handler.codec.frame.{ LengthFieldBasedFrameDecoder, LengthFieldPrepender } import org.jboss.netty.handler.ssl.SslHandler -import scala.concurrent.duration.{ FiniteDuration} +import scala.concurrent.duration.{ FiniteDuration } import scala.concurrent.{ ExecutionContext, Promise, Future, blocking } import scala.util.{ Try } import scala.util.control.{ NoStackTrace, NonFatal } @@ -162,9 +162,9 @@ private[netty] trait CommonHandlers extends NettyHelpers { protected def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle - protected def registerListener(channel: Channel, - listener: HandleEventListener, - msg: ChannelBuffer, + protected def registerListener(channel: Channel, + listener: HandleEventListener, + msg: ChannelBuffer, remoteSocketAddress: InetSocketAddress): Unit final protected def init(channel: Channel, remoteSocketAddress: SocketAddress, remoteAddress: Address, msg: ChannelBuffer)( @@ -188,7 +188,7 @@ private[netty] trait CommonHandlers extends NettyHelpers { /** * INTERNAL API */ -private[netty] abstract class ServerHandler(protected final val transport: NettyTransport, +private[netty] abstract class ServerHandler(protected final val transport: NettyTransport, private final val associationListenerFuture: Future[AssociationEventListener]) extends NettyServerHelpers with CommonHandlers { diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala index 787c754..bc63b55 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala @@ -28,9 +28,9 @@ private[remote] trait TcpHandlers extends CommonHandlers { import ChannelLocalActor._ - override def registerListener(channel: Channel, - listener: HandleEventListener, - msg: ChannelBuffer, + override def registerListener(channel: Channel, + listener: HandleEventListener, + msg: ChannelBuffer, remoteSocketAddress: InetSocketAddress): Unit = ChannelLocalActor.set(channel, Some(listener)) override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle = @@ -75,9 +75,9 @@ private[remote] class TcpClientHandler(_transport: NettyTransport, remoteAddress /** * INTERNAL API */ -private[remote] class TcpAssociationHandle(val localAddress: Address, - val remoteAddress: Address, - val transport: NettyTransport, +private[remote] class TcpAssociationHandle(val localAddress: Address, + val remoteAddress: Address, + val transport: NettyTransport, private val channel: Channel) extends AssociationHandle { import transport.executionContext diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala index 7e17cd7..e165555 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala @@ -21,9 +21,9 @@ private[remote] trait UdpHandlers extends CommonHandlers { override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle = new UdpAssociationHandle(localAddress, remoteAddress, channel, transport) - override def registerListener(channel: Channel, - listener: HandleEventListener, - msg: ChannelBuffer, + override def registerListener(channel: Channel, + listener: HandleEventListener, + msg: ChannelBuffer, remoteSocketAddress: InetSocketAddress): Unit = { transport.udpConnectionTable.putIfAbsent(remoteSocketAddress, listener) match { case null ⇒ listener notify InboundPayload(ByteString(msg.array())) @@ -72,9 +72,9 @@ private[remote] class UdpClientHandler(_transport: NettyTransport, remoteAddress /** * INTERNAL API */ -private[remote] class UdpAssociationHandle(val localAddress: Address, - val remoteAddress: Address, - private val channel: Channel, +private[remote] class UdpAssociationHandle(val localAddress: Address, + val remoteAddress: Address, + private val channel: Channel, private val transport: NettyTransport) extends AssociationHandle { override val readHandlerPromise: Promise[HandleEventListener] = Promise() diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala index 746b8cc..1847b4e 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala @@ -24,12 +24,12 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { } def createFailureDetector( - threshold: Double = 8.0, - maxSampleSize: Int = 1000, - minStdDeviation: FiniteDuration = 100.millis, + threshold: Double = 8.0, + maxSampleSize: Int = 1000, + minStdDeviation: FiniteDuration = 100.millis, acceptableLostDuration: FiniteDuration = Duration.Zero, firstHeartbeatEstimate: FiniteDuration = 1.second, - clock: Clock = FailureDetector.defaultClock) = + clock: Clock = FailureDetector.defaultClock) = new PhiAccrualFailureDetector( threshold, maxSampleSize, @@ -64,7 +64,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { "return realistic phi values" in { val fd = createFailureDetector() - val test = TreeMap(0 -> 0.0, 500 -> 0.1, 1000 -> 0.3, 1200 -> 1.6, 1400 -> 4.7, 1600 -> 10.8, 1700 -> 15.3) + val test = TreeMap(0 → 0.0, 500 → 0.1, 1000 → 0.3, 1200 → 1.6, 1400 → 4.7, 1600 → 10.8, 1700 → 15.3) for ((timeDiff, expectedPhi) ← test) { fd.phi(timeDiff = timeDiff, mean = 1000.0, stdDeviation = 100.0) should ===(expectedPhi +- (0.1)) } diff --git a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala index 0966ef0..95e015c 100644 --- a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala @@ -24,7 +24,7 @@ class DeadlineFailureDetectorSpec extends AkkaSpec { def createFailureDetector( acceptableLostDuration: FiniteDuration, - clock: Clock = FailureDetector.defaultClock) = + clock: Clock = FailureDetector.defaultClock) = new DeadlineFailureDetector(acceptableLostDuration, heartbeatInterval = 1.second)(clock = clock) "mark node as monitored after a series of successful heartbeats" in { diff --git a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala index a620de6..e2253b6 100644 --- a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala @@ -16,12 +16,12 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { } def createFailureDetector( - threshold: Double = 8.0, - maxSampleSize: Int = 1000, - minStdDeviation: FiniteDuration = 10.millis, + threshold: Double = 8.0, + maxSampleSize: Int = 1000, + minStdDeviation: FiniteDuration = 10.millis, acceptableLostDuration: FiniteDuration = Duration.Zero, firstHeartbeatEstimate: FiniteDuration = 1.second, - clock: Clock = FailureDetector.defaultClock) = + clock: Clock = FailureDetector.defaultClock) = new PhiAccrualFailureDetector( threshold, maxSampleSize, @@ -29,12 +29,12 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { acceptableLostDuration, firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock) - def createFailureDetectorRegistry(threshold: Double = 8.0, - maxSampleSize: Int = 1000, - minStdDeviation: FiniteDuration = 10.millis, + def createFailureDetectorRegistry(threshold: Double = 8.0, + maxSampleSize: Int = 1000, + minStdDeviation: FiniteDuration = 10.millis, acceptableLostDuration: FiniteDuration = Duration.Zero, firstHeartbeatEstimate: FiniteDuration = 1.second, - clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = { + clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = { new DefaultFailureDetectorRegistry[String](() ⇒ createFailureDetector( threshold, maxSampleSize, diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index d6af847..7528cc1 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -47,8 +47,8 @@ class RemoteConfigSpec extends AkkaSpec( Transports.head._1 should ===(classOf[akka.remote.transport.netty.NettyTransport].getName) Transports.head._2 should ===(Nil) Adapters should ===(Map( - "gremlin" -> classOf[akka.remote.transport.FailureInjectorProvider].getName, - "trttl" -> classOf[akka.remote.transport.ThrottlerProvider].getName)) + "gremlin" → classOf[akka.remote.transport.FailureInjectorProvider].getName, + "trttl" → classOf[akka.remote.transport.ThrottlerProvider].getName)) WatchFailureDetectorImplementationClass should ===(classOf[PhiAccrualFailureDetector].getName) WatchHeartBeatInterval should ===(1 seconds) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala index c212f1e..fff9304 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala @@ -16,7 +16,7 @@ class RemoteConsistentHashingRouterSpec extends AkkaSpec(""" "ConsistentHashingGroup" must { "use same hash ring indepenent of self address" in { - // simulating running router on two different nodes (a1, a2) with target routees on 3 other nodes (s1, s2, s3) + // simulating running router on two different nodes (a1, a2) with target routees on 3 other nodes (s1, s2, s3) val a1 = Address("akka.tcp", "Sys", "client1", 2552) val a2 = Address("akka.tcp", "Sys", "client2", 2552) val s1 = ActorSelectionRoutee(system.actorSelection("akka.tcp://Sys@server1:2552/user/a/b")) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala index 540ee0b..3ce6b98 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala @@ -59,7 +59,7 @@ akka { }).withDeploy(Deploy.local)) expectMsg(20.seconds, ref) - // we don't expect real quarantine when the UID is unknown, i.e. QuarantinedEvent is not published + // we don't expect real quarantine when the UID is unknown, i.e. QuarantinedEvent is not published probe.expectNoMsg(3.seconds) // The following verifies ticket #3870, i.e. make sure that re-delivery of Watch message is stopped. // It was observed as periodic logging of "address is now gated" when the gate was lifted. diff --git a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala index ff2076f..4ab24d8 100644 --- a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala @@ -141,9 +141,9 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D for ( (name, proto) ← Seq( - "/gonk" -> "tcp", - "/zagzag" -> "udp", - "/roghtaar" -> "ssl.tcp") + "/gonk" → "tcp", + "/zagzag" → "udp", + "/roghtaar" → "ssl.tcp") ) deploy(system, Deploy(name, scope = RemoteScope(addr(remoteSystem, proto)))) def addr(sys: ActorSystem, proto: String) = diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala index 8d5dc0c..dc98e82 100644 --- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala +++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala @@ -97,7 +97,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft } "put custom MDC values when specified" in { - producer ! StringWithMDC("Message with custom MDC values", Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values")) + producer ! StringWithMDC("Message with custom MDC values", Map("ticketNumber" → 3671, "ticketDesc" → "Custom MDC Values")) awaitCond(outputString.contains("----"), 5 seconds) val s = outputString @@ -110,7 +110,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft } "Support null values in custom MDC" in { - producer ! StringWithMDC("Message with null custom MDC values", Map("ticketNumber" -> 3671, "ticketDesc" -> null)) + producer ! StringWithMDC("Message with null custom MDC values", Map("ticketNumber" → 3671, "ticketDesc" → null)) awaitCond(outputString.contains("----"), 5 seconds) val s = outputString diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala index e809752..9103044 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala @@ -17,11 +17,11 @@ object GraphStageMessages { object TestSinkStage { def apply[T, M](stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M], - probe: TestProbe) = new TestSinkStage(stageUnderTest, probe) + probe: TestProbe) = new TestSinkStage(stageUnderTest, probe) } private[testkit] class TestSinkStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M], - probe: TestProbe) + probe: TestProbe) extends GraphStageWithMaterializedValue[SinkShape[T], M] { val in = Inlet[T]("testSinkStage.in") @@ -52,11 +52,11 @@ private[testkit] class TestSinkStage[T, M](stageUnderTest: GraphStageWithMateria object TestSourceStage { def apply[T, M](stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M], - probe: TestProbe) = Source.fromGraph(new TestSourceStage(stageUnderTest, probe)) + probe: TestProbe) = Source.fromGraph(new TestSourceStage(stageUnderTest, probe)) } private[testkit] class TestSourceStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M], - probe: TestProbe) + probe: TestProbe) extends GraphStageWithMaterializedValue[SourceShape[T], M] { val out = Outlet[T]("testSourceStage.out") diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala index 17df41f..2db1b2f 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala @@ -8,18 +8,18 @@ import org.reactivestreams.Publisher import akka.stream.ActorMaterializer class ChainSetup[In, Out, M]( - stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], + stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], val settings: ActorMaterializerSettings, materializer: ActorMaterializer, - toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit val system: ActorSystem) { + toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit val system: ActorSystem) { def this(stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], settings: ActorMaterializerSettings, toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit system: ActorSystem) = this(stream, settings, ActorMaterializer(settings)(system), toPublisher)(system) - def this(stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], - settings: ActorMaterializerSettings, + def this(stream: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], + settings: ActorMaterializerSettings, materializerCreator: (ActorMaterializerSettings, ActorRefFactory) ⇒ ActorMaterializer, - toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit system: ActorSystem) = + toPublisher: (Source[Out, _], ActorMaterializer) ⇒ Publisher[Out])(implicit system: ActorSystem) = this(stream, settings, materializerCreator(settings, system), toPublisher)(system) val upstream = TestPublisher.manualProbe[In]() diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala index e86a967..086ac50 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Coroner.scala @@ -79,7 +79,7 @@ object Coroner { // FIXME: remove once going back to project dependencies */ def watch(duration: FiniteDuration, reportTitle: String, out: PrintStream, startAndStopDuration: FiniteDuration = defaultStartAndStopDuration, - displayThreadCounts: Boolean = false): WatchHandle = { + displayThreadCounts: Boolean = false): WatchHandle = { val watchedHandle = new WatchHandleImpl(startAndStopDuration) diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala index 3ee5815..db2b5fe 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala @@ -40,13 +40,13 @@ trait ScriptedTest extends Matchers { } final class Script[In, Out]( - val providedInputs: Vector[In], + val providedInputs: Vector[In], val expectedOutputs: Vector[Out], - val jumps: Vector[Int], - val inputCursor: Int, - val outputCursor: Int, + val jumps: Vector[Int], + val inputCursor: Int, + val outputCursor: Int, val outputEndCursor: Int, - val completed: Boolean) { + val completed: Boolean) { require(jumps.size == providedInputs.size) def provideInput: (In, Script[In, Out]) = @@ -88,12 +88,12 @@ trait ScriptedTest extends Matchers { } class ScriptRunner[In, Out, M]( - op: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], - settings: ActorMaterializerSettings, - script: Script[In, Out], + op: Flow[In, In, NotUsed] ⇒ Flow[In, Out, M], + settings: ActorMaterializerSettings, + script: Script[In, Out], maximumOverrun: Int, maximumRequest: Int, - maximumBuffer: Int)(implicit _system: ActorSystem) + maximumBuffer: Int)(implicit _system: ActorSystem) extends ChainSetup(op, settings, toPublisher) { var _debugLog = Vector.empty[String] diff --git a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala index 730cb14..cb12f49 100644 --- a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala @@ -200,16 +200,16 @@ object ActorMaterializerSettings { * Create [[ActorMaterializerSettings]] from individual settings (Scala). */ def apply( - initialInputBufferSize: Int, - maxInputBufferSize: Int, - dispatcher: String, - supervisionDecider: Supervision.Decider, + initialInputBufferSize: Int, + maxInputBufferSize: Int, + dispatcher: String, + supervisionDecider: Supervision.Decider, subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - debugLogging: Boolean, - outputBurstLimit: Int, - fuzzingMode: Boolean, - autoFusing: Boolean, - maxFixedBufferSize: Int) = + debugLogging: Boolean, + outputBurstLimit: Int, + fuzzingMode: Boolean, + autoFusing: Boolean, + maxFixedBufferSize: Int) = new ActorMaterializerSettings( initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize) @@ -240,16 +240,16 @@ object ActorMaterializerSettings { * Create [[ActorMaterializerSettings]] from individual settings (Java). */ def create( - initialInputBufferSize: Int, - maxInputBufferSize: Int, - dispatcher: String, - supervisionDecider: Supervision.Decider, + initialInputBufferSize: Int, + maxInputBufferSize: Int, + dispatcher: String, + supervisionDecider: Supervision.Decider, subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - debugLogging: Boolean, - outputBurstLimit: Int, - fuzzingMode: Boolean, - autoFusing: Boolean, - maxFixedBufferSize: Int) = + debugLogging: Boolean, + outputBurstLimit: Int, + fuzzingMode: Boolean, + autoFusing: Boolean, + maxFixedBufferSize: Int) = new ActorMaterializerSettings( initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize) @@ -273,16 +273,16 @@ object ActorMaterializerSettings { * Please refer to the `withX` methods for descriptions of the individual settings. */ final class ActorMaterializerSettings( - val initialInputBufferSize: Int, - val maxInputBufferSize: Int, - val dispatcher: String, - val supervisionDecider: Supervision.Decider, + val initialInputBufferSize: Int, + val maxInputBufferSize: Int, + val dispatcher: String, + val supervisionDecider: Supervision.Decider, val subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - val debugLogging: Boolean, - val outputBurstLimit: Int, - val fuzzingMode: Boolean, - val autoFusing: Boolean, - val maxFixedBufferSize: Int) { + val debugLogging: Boolean, + val outputBurstLimit: Int, + val fuzzingMode: Boolean, + val autoFusing: Boolean, + val maxFixedBufferSize: Int) { require(initialInputBufferSize > 0, "initialInputBufferSize must be > 0") @@ -290,16 +290,16 @@ final class ActorMaterializerSettings( require(initialInputBufferSize <= maxInputBufferSize, s"initialInputBufferSize($initialInputBufferSize) must be <= maxInputBufferSize($maxInputBufferSize)") private def copy( - initialInputBufferSize: Int = this.initialInputBufferSize, - maxInputBufferSize: Int = this.maxInputBufferSize, - dispatcher: String = this.dispatcher, - supervisionDecider: Supervision.Decider = this.supervisionDecider, + initialInputBufferSize: Int = this.initialInputBufferSize, + maxInputBufferSize: Int = this.maxInputBufferSize, + dispatcher: String = this.dispatcher, + supervisionDecider: Supervision.Decider = this.supervisionDecider, subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings = this.subscriptionTimeoutSettings, - debugLogging: Boolean = this.debugLogging, - outputBurstLimit: Int = this.outputBurstLimit, - fuzzingMode: Boolean = this.fuzzingMode, - autoFusing: Boolean = this.autoFusing, - maxFixedBufferSize: Int = this.maxFixedBufferSize) = + debugLogging: Boolean = this.debugLogging, + outputBurstLimit: Int = this.outputBurstLimit, + fuzzingMode: Boolean = this.fuzzingMode, + autoFusing: Boolean = this.autoFusing, + maxFixedBufferSize: Int = this.maxFixedBufferSize) = new ActorMaterializerSettings( initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize) diff --git a/akka-stream/src/main/scala/akka/stream/Attributes.scala b/akka-stream/src/main/scala/akka/stream/Attributes.scala index 2a76dcc..892b284 100644 --- a/akka-stream/src/main/scala/akka/stream/Attributes.scala +++ b/akka-stream/src/main/scala/akka/stream/Attributes.scala @@ -67,14 +67,14 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) { Optional.ofNullable(attributeList.foldLeft( null.asInstanceOf[T] )( - (acc, attr) ⇒ if (c.isInstance(attr)) c.cast(attr) else acc) + (acc, attr) ⇒ if (c.isInstance(attr)) c.cast(attr) else acc) ) /** * Java API: Get the first (least specific) attribute of a given `Class` or subclass thereof. */ def getFirstAttribute[T <: Attribute](c: Class[T]): Optional[T] = - attributeList.collectFirst { case attr if c.isInstance(attr) => c cast attr }.asJava + attributeList.collectFirst { case attr if c.isInstance(attr) ⇒ c cast attr }.asJava /** * Scala API: get all attributes of a given type (or subtypes thereof). @@ -105,7 +105,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) { */ def get[T <: Attribute: ClassTag]: Option[T] = { val c = classTag[T].runtimeClass.asInstanceOf[Class[T]] - attributeList.reverseIterator.collectFirst[T] { case attr if c.isInstance(attr) => c.cast(attr) } + attributeList.reverseIterator.collectFirst[T] { case attr if c.isInstance(attr) ⇒ c.cast(attr) } } /** @@ -113,7 +113,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) { */ def getFirst[T <: Attribute: ClassTag]: Option[T] = { val c = classTag[T].runtimeClass.asInstanceOf[Class[T]] - attributeList.collectFirst { case attr if c.isInstance(attr) => c.cast(attr) } + attributeList.collectFirst { case attr if c.isInstance(attr) ⇒ c.cast(attr) } } /** diff --git a/akka-stream/src/main/scala/akka/stream/Fusing.scala b/akka-stream/src/main/scala/akka/stream/Fusing.scala index d90abac..4af2774 100644 --- a/akka-stream/src/main/scala/akka/stream/Fusing.scala +++ b/akka-stream/src/main/scala/akka/stream/Fusing.scala @@ -43,7 +43,7 @@ object Fusing { * topology for convenient graph traversal. */ case class FusedGraph[+S <: Shape @uncheckedVariance, +M](override val module: FusedModule, - override val shape: S) extends Graph[S, M] { + override val shape: S) extends Graph[S, M] { // the @uncheckedVariance look like a compiler bug ... why does it work in Graph but not here? override def withAttributes(attr: Attributes) = copy(module = module.withAttributes(attr)) } @@ -55,10 +55,10 @@ object Fusing { * the wirings in a more accessible form, allowing traversal from port to upstream * or downstream port and from there to the owning module (or graph vertex). */ - final case class StructuralInfo(upstreams: immutable.Map[InPort, OutPort], + final case class StructuralInfo(upstreams: immutable.Map[InPort, OutPort], downstreams: immutable.Map[OutPort, InPort], - inOwners: immutable.Map[InPort, Module], - outOwners: immutable.Map[OutPort, Module], - allModules: Set[Module]) + inOwners: immutable.Map[InPort, Module], + outOwners: immutable.Map[OutPort, Module], + allModules: Set[Module]) } diff --git a/akka-stream/src/main/scala/akka/stream/Materializer.scala b/akka-stream/src/main/scala/akka/stream/Materializer.scala index 32d739d..5b0a6b3 100644 --- a/akka-stream/src/main/scala/akka/stream/Materializer.scala +++ b/akka-stream/src/main/scala/akka/stream/Materializer.scala @@ -78,6 +78,6 @@ private[akka] object NoMaterializer extends Materializer { * Context parameter to the `create` methods of sources and sinks. */ private[akka] case class MaterializationContext( - materializer: Materializer, + materializer: Materializer, effectiveAttributes: Attributes, - stageName: String) + stageName: String) diff --git a/akka-stream/src/main/scala/akka/stream/Shape.scala b/akka-stream/src/main/scala/akka/stream/Shape.scala index 5e9259c..23a5ebc 100644 --- a/akka-stream/src/main/scala/akka/stream/Shape.scala +++ b/akka-stream/src/main/scala/akka/stream/Shape.scala @@ -304,9 +304,9 @@ object SinkShape { * +------+ * }}} */ -final case class BidiShape[-In1, +Out1, -In2, +Out2](in1: Inlet[In1 @uncheckedVariance], +final case class BidiShape[-In1, +Out1, -In2, +Out2](in1: Inlet[In1 @uncheckedVariance], out1: Outlet[Out1 @uncheckedVariance], - in2: Inlet[In2 @uncheckedVariance], + in2: Inlet[In2 @uncheckedVariance], out2: Outlet[Out2 @uncheckedVariance]) extends Shape { //#implementation-details-elided override val inlets: immutable.Seq[Inlet[_]] = List(in1, in2) @@ -333,9 +333,9 @@ object BidiShape { BidiShape(top.in, top.out, bottom.in, bottom.out) /** Java API */ - def of[In1, Out1, In2, Out2](in1: Inlet[In1 @uncheckedVariance], + def of[In1, Out1, In2, Out2](in1: Inlet[In1 @uncheckedVariance], out1: Outlet[Out1 @uncheckedVariance], - in2: Inlet[In2 @uncheckedVariance], + in2: Inlet[In2 @uncheckedVariance], out2: Outlet[Out2 @uncheckedVariance]): BidiShape[In1, Out1, In2, Out2] = BidiShape(in1, out1, in2, out2) diff --git a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala index 944f6c3..7f02f4e 100644 --- a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala +++ b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala @@ -190,9 +190,9 @@ object TLSProtocol { */ case class NegotiateNewSession( enabledCipherSuites: Option[immutable.Seq[String]], - enabledProtocols: Option[immutable.Seq[String]], - clientAuth: Option[TLSClientAuth], - sslParameters: Option[SSLParameters]) extends SslTlsOutbound { + enabledProtocols: Option[immutable.Seq[String]], + clientAuth: Option[TLSClientAuth], + sslParameters: Option[SSLParameters]) extends SslTlsOutbound { /** * Java API: Make a copy of this message with the given `enabledCipherSuites`. diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala index 0ce74e6..65c0353 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala @@ -26,12 +26,12 @@ import akka.stream.impl.fusing.GraphInterpreterShell /** * INTERNAL API */ -private[akka] case class ActorMaterializerImpl(system: ActorSystem, +private[akka] case class ActorMaterializerImpl(system: ActorSystem, override val settings: ActorMaterializerSettings, - dispatchers: Dispatchers, - supervisor: ActorRef, - haveShutDown: AtomicBoolean, - flowNames: SeqActorName) extends ActorMaterializer { + dispatchers: Dispatchers, + supervisor: ActorRef, + haveShutDown: AtomicBoolean, + flowNames: SeqActorName) extends ActorMaterializer { import akka.stream.impl.Stages._ private val _logger = Logging.getLogger(system, this) override def logger = _logger @@ -79,7 +79,7 @@ private[akka] case class ActorMaterializerImpl(system: ActorSystem, materialize(_runnableGraph, null) private[stream] def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat], - subflowFuser: GraphInterpreterShell ⇒ ActorRef): Mat = { + subflowFuser: GraphInterpreterShell ⇒ ActorRef): Mat = { val runnableGraph = if (settings.autoFusing) Fusing.aggressive(_runnableGraph) else _runnableGraph @@ -175,14 +175,14 @@ private[akka] case class ActorMaterializerImpl(system: ActorSystem, } // FIXME: Remove this, only stream-of-stream ops need it - private def processorFor(op: StageModule, + private def processorFor(op: StageModule, effectiveAttributes: Attributes, - effectiveSettings: ActorMaterializerSettings): (Processor[Any, Any], Any) = op match { + effectiveSettings: ActorMaterializerSettings): (Processor[Any, Any], Any) = op match { case DirectProcessor(processorFactory, _) ⇒ processorFactory() case _ ⇒ val (opprops, mat) = ActorProcessorFactory.props(ActorMaterializerImpl.this, op, effectiveAttributes) ActorProcessorFactory[Any, Any]( - actorOf(opprops, stageName(effectiveAttributes), effectiveSettings.dispatcher)) -> mat + actorOf(opprops, stageName(effectiveAttributes), effectiveSettings.dispatcher)) → mat } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala index 2060317..50f90da 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala @@ -15,9 +15,9 @@ import akka.stream.stage._ * INTERNAL API */ private[akka] class ActorRefBackpressureSinkStage[In](ref: ActorRef, onInitMessage: Any, - ackMessage: Any, + ackMessage: Any, onCompleteMessage: Any, - onFailureMessage: (Throwable) ⇒ Any) + onFailureMessage: (Throwable) ⇒ Any) extends GraphStage[SinkShape[In]] { val in: Inlet[In] = Inlet[In]("ActorRefBackpressureSink.in") override def initialAttributes = DefaultAttributes.actorRefWithAck diff --git a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala index 9061de4..53cb12e 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala @@ -47,7 +47,7 @@ private[akka] final case class ErrorPublisher(t: Throwable, name: String) extend */ private[akka] final case class MaybePublisher[T]( promise: Promise[Option[T]], - name: String)(implicit ec: ExecutionContext) extends Publisher[T] { + name: String)(implicit ec: ExecutionContext) extends Publisher[T] { import ReactiveStreamsCompliance._ private[this] class MaybeSubscription(subscriber: Subscriber[_ >: T]) extends Subscription { diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala index 3fe6993..67c0ec2 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala @@ -7,10 +7,10 @@ import org.reactivestreams.Subscriber /** * INTERNAL API */ -private[akka] abstract class FanoutOutputs(val maxBufferSize: Int, +private[akka] abstract class FanoutOutputs(val maxBufferSize: Int, val initialBufferSize: Int, - self: ActorRef, - val pump: Pump) + self: ActorRef, + val pump: Pump) extends DefaultOutputTransferStates with SubscriberManagement[Any] { @@ -115,7 +115,7 @@ private[akka] class FanoutProcessorImpl(_settings: ActorMaterializerSettings) log.debug("fail due to: {}", e.getMessage) primaryInputs.cancel() primaryOutputs.error(e) - // Stopping will happen after flush + // Stopping will happen after flush } override def pumpFinished(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala index a073ffc..3de502e 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala @@ -14,7 +14,7 @@ import ResizableMultiReaderRingBuffer._ * elements, rather, if full, the buffer tries to grow and rejects further writes if max capacity is reached. */ private[akka] class ResizableMultiReaderRingBuffer[T](initialSize: Int, // constructor param, not field - maxSize: Int, // constructor param, not field + maxSize: Int, // constructor param, not field val cursors: Cursors) { require(Integer.lowestOneBit(maxSize) == maxSize && 0 < maxSize && maxSize <= Int.MaxValue / 2, "maxSize must be a power of 2 that is > 0 and < Int.MaxValue/2") diff --git a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala index f7fdac9..22cca99 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala @@ -73,7 +73,7 @@ private[akka] class PublisherSink[In](val attributes: Attributes, shape: SinkSha */ private[akka] final class FanoutPublisherSink[In]( val attributes: Attributes, - shape: SinkShape[In]) + shape: SinkShape[In]) extends SinkModule[In, Publisher[In]](shape) { override def create(context: MaterializationContext): (Subscriber[In], Publisher[In]) = { @@ -156,7 +156,7 @@ private[akka] final class ActorSubscriberSink[In](props: Props, val attributes: */ private[akka] final class ActorRefSink[In](ref: ActorRef, onCompleteMessage: Any, val attributes: Attributes, - shape: SinkShape[In]) extends SinkModule[In, NotUsed](shape) { + shape: SinkShape[In]) extends SinkModule[In, NotUsed](shape) { override def create(context: MaterializationContext) = { val actorMaterializer = ActorMaterializer.downcast(context.materializer) diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala index 3c4e5ea..48954df 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala @@ -338,9 +338,9 @@ object StreamLayout { override def materializedValueComputation: MaterializedValueNode = Ignore } - final case class CopiedModule(override val shape: Shape, + final case class CopiedModule(override val shape: Shape, override val attributes: Attributes, - copyOf: Module) extends Module { + copyOf: Module) extends Module { override val subModules: Set[Module] = Set(copyOf) override def withAttributes(attr: Attributes): Module = @@ -363,12 +363,12 @@ object StreamLayout { } final case class CompositeModule( - override val subModules: Set[Module], - override val shape: Shape, - override val downstreams: Map[OutPort, InPort], - override val upstreams: Map[InPort, OutPort], + override val subModules: Set[Module], + override val shape: Shape, + override val downstreams: Map[OutPort, InPort], + override val upstreams: Map[InPort, OutPort], override val materializedValueComputation: MaterializedValueNode, - override val attributes: Attributes) extends Module { + override val attributes: Attributes) extends Module { override def replaceShape(s: Shape): Module = if (s != shape) { @@ -395,13 +395,13 @@ object StreamLayout { } final case class FusedModule( - override val subModules: Set[Module], - override val shape: Shape, - override val downstreams: Map[OutPort, InPort], - override val upstreams: Map[InPort, OutPort], + override val subModules: Set[Module], + override val shape: Shape, + override val downstreams: Map[OutPort, InPort], + override val upstreams: Map[InPort, OutPort], override val materializedValueComputation: MaterializedValueNode, - override val attributes: Attributes, - info: Fusing.StructuralInfo) extends Module { + override val attributes: Attributes, + info: Fusing.StructuralInfo) extends Module { override def isFused: Boolean = true diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala index 61de42a..d22b688 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala @@ -14,9 +14,9 @@ object SubFlowImpl { } } -class SubFlowImpl[In, Out, Mat, F[+_], C](val subFlow: Flow[In, Out, NotUsed], +class SubFlowImpl[In, Out, Mat, F[+_], C](val subFlow: Flow[In, Out, NotUsed], mergeBackFunction: SubFlowImpl.MergeBack[In, F], - finishFunction: Sink[In, NotUsed] ⇒ C) + finishFunction: Sink[In, NotUsed] ⇒ C) extends SubFlow[Out, Mat, F, C] { override def deprecatedAndThen[U](op: Stages.StageModule): SubFlow[U, Mat, F, C] = diff --git a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala index e9ea2fc..4b4c0b4 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala @@ -13,11 +13,11 @@ import scala.concurrent.duration.{ FiniteDuration, _ } /** * INTERNAL API */ -private[stream] class Throttle[T](cost: Int, - per: FiniteDuration, - maximumBurst: Int, +private[stream] class Throttle[T](cost: Int, + per: FiniteDuration, + maximumBurst: Int, costCalculation: (T) ⇒ Int, - mode: ThrottleMode) + mode: ThrottleMode) extends SimpleLinearGraphStage[T] { require(cost > 0, "cost must be > 0") require(per.toMillis > 0, "per time must be > 0") diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala index 61f6170..408c2c2 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala @@ -303,13 +303,13 @@ private[stream] object ActorGraphInterpreter { * INTERNAL API */ private[stream] final class GraphInterpreterShell( - assembly: GraphAssembly, - inHandlers: Array[InHandler], + assembly: GraphAssembly, + inHandlers: Array[InHandler], outHandlers: Array[OutHandler], - logics: Array[GraphStageLogic], - shape: Shape, - settings: ActorMaterializerSettings, - val mat: ActorMaterializerImpl) { + logics: Array[GraphStageLogic], + shape: Shape, + settings: ActorMaterializerSettings, + val mat: ActorMaterializerImpl) { import ActorGraphInterpreter._ diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala index 3486703..26aba3b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Fusing.scala @@ -241,11 +241,11 @@ private[stream] object Fusing { * correspondence is then used during materialization to trigger these sources * when “their” node has received its value. */ - private def descend(m: Module, + private def descend(m: Module, inheritedAttributes: Attributes, - struct: BuildStructuralInfo, - openGroup: ju.Set[Module], - indent: Int): List[(Module, MaterializedValueNode)] = { + struct: BuildStructuralInfo, + openGroup: ju.Set[Module], + indent: Int): List[(Module, MaterializedValueNode)] = { def log(msg: String): Unit = println(" " * indent + msg) val async = m match { case _: GraphStageModule ⇒ m.attributes.contains(AsyncBoundary) @@ -323,14 +323,14 @@ private[stream] object Fusing { var result = List.empty[(Module, MaterializedValueNode)] var i = 0 while (i < mvids.length) { - result ::= mvids(i) -> Atomic(newids(i)) + result ::= mvids(i) → Atomic(newids(i)) i += 1 } - result ::= m -> Atomic(newgm) + result ::= m → Atomic(newgm) result case _ ⇒ if (Debug) log(s"atomic module $m") - List(m -> struct.addModule(m, localGroup, inheritedAttributes, indent)) + List(m → struct.addModule(m, localGroup, inheritedAttributes, indent)) } } else { val attributes = inheritedAttributes and m.attributes @@ -338,7 +338,7 @@ private[stream] object Fusing { case CopiedModule(shape, _, copyOf) ⇒ val ret = descend(copyOf, attributes, struct, localGroup, indent + 1) match { - case xs @ (_, mat) :: _ ⇒ (m -> mat) :: xs + case xs @ (_, mat) :: _ ⇒ (m → mat) :: xs case _ ⇒ throw new IllegalArgumentException("cannot happen") } struct.rewire(copyOf.shape, shape, indent) @@ -387,7 +387,7 @@ private[stream] object Fusing { struct.replace(c, replacement, localGroup) } // the result for each level is the materialized value computation - List(m -> newMat) + List(m → newMat) } } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala index c141708..21e1bc0 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala @@ -99,12 +99,12 @@ private[akka] object GraphInterpreter { * corresponding segments of these arrays matches the exact same order of the ports in the [[Shape]]. * */ - final class GraphAssembly(val stages: Array[GraphStageWithMaterializedValue[Shape, Any]], + final class GraphAssembly(val stages: Array[GraphStageWithMaterializedValue[Shape, Any]], val originalAttributes: Array[Attributes], - val ins: Array[Inlet[_]], - val inOwners: Array[Int], - val outs: Array[Outlet[_]], - val outOwners: Array[Int]) { + val ins: Array[Inlet[_]], + val inOwners: Array[Int], + val outs: Array[Outlet[_]], + val outOwners: Array[Int]) { require(ins.length == inOwners.length && inOwners.length == outs.length && outs.length == outOwners.length) def connectionCount: Int = ins.length @@ -120,9 +120,9 @@ private[akka] object GraphInterpreter { * - materialized value */ def materialize(inheritedAttributes: Attributes, - copiedModules: Array[Module], - matVal: ju.Map[Module, Any], - register: MaterializedValueSource[Any] ⇒ Unit): (Array[InHandler], Array[OutHandler], Array[GraphStageLogic]) = { + copiedModules: Array[Module], + matVal: ju.Map[Module, Any], + register: MaterializedValueSource[Any] ⇒ Unit): (Array[InHandler], Array[OutHandler], Array[GraphStageLogic]) = { val logics = Array.ofDim[GraphStageLogic](stages.length) var i = 0 @@ -205,9 +205,9 @@ private[akka] object GraphInterpreter { /** * INTERNAL API */ - final def apply(inlets: immutable.Seq[Inlet[_]], + final def apply(inlets: immutable.Seq[Inlet[_]], outlets: immutable.Seq[Outlet[_]], - stages: GraphStageWithMaterializedValue[Shape, _]*): GraphAssembly = { + stages: GraphStageWithMaterializedValue[Shape, _]*): GraphAssembly = { // add the contents of an iterator to an array starting at idx @tailrec def add[T](i: Iterator[T], a: Array[T], idx: Int): Array[T] = if (i.hasNext) { @@ -342,13 +342,13 @@ private[akka] object GraphInterpreter { */ private[stream] final class GraphInterpreter( private val assembly: GraphInterpreter.GraphAssembly, - val materializer: Materializer, - val log: LoggingAdapter, - val inHandlers: Array[InHandler], // Lookup table for the InHandler of a connection - val outHandlers: Array[OutHandler], // Lookup table for the outHandler of the connection - val logics: Array[GraphStageLogic], // Array of stage logics - val onAsyncInput: (GraphStageLogic, Any, (Any) ⇒ Unit) ⇒ Unit, - val fuzzingMode: Boolean) { + val materializer: Materializer, + val log: LoggingAdapter, + val inHandlers: Array[InHandler], // Lookup table for the InHandler of a connection + val outHandlers: Array[OutHandler], // Lookup table for the outHandler of the connection + val logics: Array[GraphStageLogic], // Array of stage logics + val onAsyncInput: (GraphStageLogic, Any, (Any) ⇒ Unit) ⇒ Unit, + val fuzzingMode: Boolean) { import GraphInterpreter._ // Maintains additional information for events, basically elements in-flight, or failure. diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala index 6c942db..bdda4b5 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala @@ -22,9 +22,9 @@ import scala.util.Try /** * INTERNAL API */ -private[akka] final case class GraphStageModule(shape: Shape, +private[akka] final case class GraphStageModule(shape: Shape, attributes: Attributes, - stage: GraphStageWithMaterializedValue[Shape, Any]) extends Module { + stage: GraphStageWithMaterializedValue[Shape, Any]) extends Module { override def carbonCopy: Module = CopiedModule(shape.deepCopy(), Attributes.none, this) override def replaceShape(s: Shape): Module = diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala index 4dd67c4..9f2303b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala @@ -369,22 +369,22 @@ private[akka] final case class Buffer[T](size: Int, overflowStrategy: OverflowSt if (buffer.isFull) buffer.dropHead() buffer.enqueue(elem) ctx.pull() - case DropTail ⇒ (ctx, elem) ⇒ + case DropTail ⇒ (ctx, elem) ⇒ if (buffer.isFull) buffer.dropTail() buffer.enqueue(elem) ctx.pull() - case DropBuffer ⇒ (ctx, elem) ⇒ + case DropBuffer ⇒ (ctx, elem) ⇒ if (buffer.isFull) buffer.clear() buffer.enqueue(elem) ctx.pull() - case DropNew ⇒ (ctx, elem) ⇒ + case DropNew ⇒ (ctx, elem) ⇒ if (!buffer.isFull) buffer.enqueue(elem) ctx.pull() - case Backpressure ⇒ (ctx, elem) ⇒ + case Backpressure ⇒ (ctx, elem) ⇒ buffer.enqueue(elem) if (buffer.isFull) ctx.holdUpstream() else ctx.pull() - case Fail ⇒ (ctx, elem) ⇒ + case Fail ⇒ (ctx, elem) ⇒ if (buffer.isFull) ctx.fail(new BufferOverflowException(s"Buffer overflow (max capacity was: $size)!")) else { buffer.enqueue(elem) @@ -654,7 +654,7 @@ private[akka] final case class MapAsync[In, Out](parallelism: Int, f: In ⇒ Fut val future = f(grab(in)) val holder = new Holder[Try[Out]](NotYetThere) buffer.enqueue(holder) - future.onComplete(result ⇒ futureCB.invoke(holder -> result))(akka.dispatch.ExecutionContexts.sameThreadExecutionContext) + future.onComplete(result ⇒ futureCB.invoke(holder → result))(akka.dispatch.ExecutionContexts.sameThreadExecutionContext) } catch { case NonFatal(ex) ⇒ if (decider(ex) == Supervision.Stop) failStage(ex) @@ -750,7 +750,7 @@ private[akka] final case class MapAsyncUnordered[In, Out](parallelism: Int, f: I */ private[akka] final case class Log[T](name: String, extract: T ⇒ Any, logAdapter: Option[LoggingAdapter], - decider: Supervision.Decider) extends PushStage[T, T] { + decider: Supervision.Decider) extends PushStage[T, T] { import Log._ diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala index 45d83b4..130f7f5 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala @@ -82,9 +82,9 @@ private[akka] object ByteStringParser { * @param acceptUpstreamFinish - if true - stream will complete when received `onUpstreamFinish`, if "false" * - onTruncation will be called */ - case class ParseResult[+T](result: Option[T], - nextStep: ParseStep[T], - acceptUpstreamFinish: Boolean = true) + case class ParseResult[+T](result: Option[T], + nextStep: ParseStep[T], + acceptUpstreamFinish: Boolean = true) trait ParseStep[+T] { /** diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala index 93b0eed..1191971 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala @@ -91,8 +91,8 @@ final private[stream] class InputStreamSinkStage(readTimeout: FiniteDuration) ex * InputStreamAdapter that interacts with InputStreamSinkStage */ private[akka] class InputStreamAdapter(sharedBuffer: BlockingQueue[StreamToAdapterMessage], - sendToStage: (AdapterToStageMessage) ⇒ Unit, - readTimeout: FiniteDuration) + sendToStage: (AdapterToStageMessage) ⇒ Unit, + readTimeout: FiniteDuration) extends InputStream { var isInitialized = false diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala index c964f73..00a1d55 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala @@ -119,10 +119,10 @@ final private[stream] class OutputStreamSourceStage(writeTimeout: FiniteDuration } } -private[akka] class OutputStreamAdapter(dataQueue: BlockingQueue[ByteString], +private[akka] class OutputStreamAdapter(dataQueue: BlockingQueue[ByteString], downstreamStatus: AtomicReference[DownstreamStatus], - sendToStage: (AdapterToStageMessage) ⇒ Future[Unit], - writeTimeout: FiniteDuration) + sendToStage: (AdapterToStageMessage) ⇒ Future[Unit], + writeTimeout: FiniteDuration) extends OutputStream { var isActive = true diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala index 07fdfdf..ee18712 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala @@ -23,13 +23,13 @@ import akka.stream.TLSProtocol._ */ private[akka] object TLSActor { - def props(settings: ActorMaterializerSettings, - sslContext: SSLContext, + def props(settings: ActorMaterializerSettings, + sslContext: SSLContext, firstSession: NegotiateNewSession, - role: TLSRole, - closing: TLSClosing, - hostInfo: Option[(String, Int)], - tracing: Boolean = false): Props = + role: TLSRole, + closing: TLSClosing, + hostInfo: Option[(String, Int)], + tracing: Boolean = false): Props = Props(new TLSActor(settings, sslContext, firstSession, role, closing, hostInfo, tracing)).withDeploy(Deploy.local) final val TransportIn = 0 @@ -42,8 +42,8 @@ private[akka] object TLSActor { /** * INTERNAL API. */ -private[akka] class TLSActor(settings: ActorMaterializerSettings, - sslContext: SSLContext, +private[akka] class TLSActor(settings: ActorMaterializerSettings, + sslContext: SSLContext, firstSession: NegotiateNewSession, role: TLSRole, closing: TLSClosing, hostInfo: Option[(String, Int)], tracing: Boolean) extends Actor with ActorLogging with Pump { diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala index 48b2b28..c63c982 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala @@ -26,12 +26,12 @@ import scala.concurrent.{ Future, Promise } /** * INTERNAL API */ -private[stream] class ConnectionSourceStage(val tcpManager: ActorRef, - val endpoint: InetSocketAddress, - val backlog: Int, - val options: immutable.Traversable[SocketOption], - val halfClose: Boolean, - val idleTimeout: Duration, +private[stream] class ConnectionSourceStage(val tcpManager: ActorRef, + val endpoint: InetSocketAddress, + val backlog: Int, + val options: immutable.Traversable[SocketOption], + val halfClose: Boolean, + val idleTimeout: Duration, val bindShutdownTimeout: FiniteDuration) extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[StreamTcp.ServerBinding]] { import ConnectionSourceStage._ @@ -154,10 +154,10 @@ private[stream] object TcpConnectionStage { def halfClose: Boolean } case class Outbound( - manager: ActorRef, - connectCmd: Connect, + manager: ActorRef, + connectCmd: Connect, localAddressPromise: Promise[InetSocketAddress], - halfClose: Boolean) extends TcpRole + halfClose: Boolean) extends TcpRole case class Inbound(connection: ActorRef, halfClose: Boolean) extends TcpRole /* @@ -312,12 +312,12 @@ private[stream] class IncomingConnectionStage(connection: ActorRef, remoteAddres /** * INTERNAL API */ -private[stream] class OutgoingConnectionStage(manager: ActorRef, - remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = true, - connectTimeout: Duration = Duration.Inf) +private[stream] class OutgoingConnectionStage(manager: ActorRef, + remoteAddress: InetSocketAddress, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = true, + connectTimeout: Duration = Duration.Inf) extends GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Future[StreamTcp.OutgoingConnection]] { import TcpConnectionStage._ diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala index 84bcb03..db5a120 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala @@ -13,9 +13,9 @@ import akka.util.ByteString private[akka] final case class TlsModule(plainIn: Inlet[SslTlsOutbound], plainOut: Outlet[SslTlsInbound], cipherIn: Inlet[ByteString], cipherOut: Outlet[ByteString], shape: Shape, attributes: Attributes, - sslContext: SSLContext, + sslContext: SSLContext, firstSession: NegotiateNewSession, - role: TLSRole, closing: TLSClosing, hostInfo: Option[(String, Int)]) extends Module { + role: TLSRole, closing: TLSClosing, hostInfo: Option[(String, Int)]) extends Module { override def subModules: Set[Module] = Set.empty override def withAttributes(att: Attributes): Module = copy(attributes = att) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala index 716d8b1..88ba60b 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala @@ -46,8 +46,8 @@ object BidiFlow { * */ def fromFlowsMat[I1, O1, I2, O2, M1, M2, M]( - flow1: Graph[FlowShape[I1, O1], M1], - flow2: Graph[FlowShape[I2, O2], M2], + flow1: Graph[FlowShape[I1, O1], M1], + flow2: Graph[FlowShape[I2, O2], M2], combine: function.Function2[M1, M2, M]): BidiFlow[I1, O1, I2, O2, M] = { new BidiFlow(scaladsl.BidiFlow.fromFlowsMat(flow1, flow2)(combinerToScala(combine))) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala index e1ca98f..eb1927d 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala @@ -1400,8 +1400,8 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * * @see [[#merge]] */ - def mergeMat[T >: Out, M, M2](that: Graph[SourceShape[T], M], - matF: function.Function2[Mat, M, M2], + def mergeMat[T >: Out, M, M2](that: Graph[SourceShape[T], M], + matF: function.Function2[Mat, M, M2], eagerComplete: Boolean): javadsl.Flow[In, T, M2] = new Flow(delegate.mergeMat(that)(combinerToScala(matF))) @@ -1458,7 +1458,7 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends def zipMat[T, M, M2](that: Graph[SourceShape[T], M], matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out @uncheckedVariance Pair T, M2] = this.viaMat(Flow.fromGraph(GraphDSL.create(that, - new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out @ uncheckedVariance Pair T]] { + new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out @uncheckedVariance Pair T]] { def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out @uncheckedVariance Pair T] = { val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T]) b.from(s).toInlet(zip.in1) @@ -1478,7 +1478,7 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] = new Flow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) @@ -1488,9 +1488,9 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends * * @see [[#zipWith]] */ - def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], + def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], combine: function.Function2[Out, Out2, Out3], - matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] = + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] = new Flow(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF))) /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala index c3002bf..a9e7b77 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala @@ -66,8 +66,8 @@ object Framing { * this Flow will fail the stream. This length *includes* the header (i.e the offset and * the length of the size field) */ - def lengthField(fieldLength: Int, - fieldOffset: Int, + def lengthField(fieldLength: Int, + fieldOffset: Int, maximumFrameLength: Int): Flow[ByteString, ByteString, NotUsed] = scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength).asJava @@ -85,10 +85,10 @@ object Framing { * the length of the size field) * @param byteOrder The ''ByteOrder'' to be used when decoding the field */ - def lengthField(fieldLength: Int, - fieldOffset: Int, + def lengthField(fieldLength: Int, + fieldOffset: Int, maximumFrameLength: Int, - byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] = + byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] = scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength, byteOrder).asJava /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala index 8afcee3..83994e8 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala @@ -671,7 +671,7 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] = new Source(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) @@ -681,9 +681,9 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap * * @see [[#zipWith]]. */ - def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], + def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], combine: function.Function2[Out, Out2, Out3], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] = + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] = new Source(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF))) /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala index 3ee680a..7143fbb 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala @@ -1039,7 +1039,7 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] = new SubFlow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala index e966b9a..ff88efc 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala @@ -1038,7 +1038,7 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] = new SubSource(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala index d05c4a8..046b2ea 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala @@ -120,11 +120,11 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * independently whether the client is still attempting to write. This setting is recommended * for servers, and therefore it is the default setting. */ - def bind(interface: String, - port: Int, - backlog: Int, - options: JIterable[SocketOption], - halfClose: Boolean, + def bind(interface: String, + port: Int, + backlog: Int, + options: JIterable[SocketOption], + halfClose: Boolean, idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] = Source.fromGraph(delegate.bind(interface, port, backlog, immutableSeq(options), halfClose, idleTimeout) .map(new IncomingConnection(_)) @@ -159,12 +159,12 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * If set to false, the connection will immediately closed once the client closes its write side, * independently whether the server is still attempting to write. */ - def outgoingConnection(remoteAddress: InetSocketAddress, - localAddress: Optional[InetSocketAddress], - options: JIterable[SocketOption], - halfClose: Boolean, + def outgoingConnection(remoteAddress: InetSocketAddress, + localAddress: Optional[InetSocketAddress], + options: JIterable[SocketOption], + halfClose: Boolean, connectTimeout: Duration, - idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = + idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = Flow.fromGraph(delegate.outgoingConnection(remoteAddress, localAddress.asScala, immutableSeq(options), halfClose, connectTimeout, idleTimeout) .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala index 7e195c0..bd5673f 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala @@ -194,8 +194,7 @@ object BidiFlow { def fromFlowsMat[I1, O1, I2, O2, M1, M2, M]( flow1: Graph[FlowShape[I1, O1], M1], flow2: Graph[FlowShape[I2, O2], M2])(combine: (M1, M2) ⇒ M): BidiFlow[I1, O1, I2, O2, M] = - fromGraph(GraphDSL.create(flow1, flow2)(combine) { - implicit b ⇒ (f1, f2) ⇒ BidiShape(f1.in, f1.out, f2.in, f2.out) + fromGraph(GraphDSL.create(flow1, flow2)(combine) { implicit b ⇒ (f1, f2) ⇒ BidiShape(f1.in, f1.out, f2.in, f2.out) }) /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala index 595aa5c..51a5d47 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala @@ -26,7 +26,7 @@ import akka.NotUsed * A `Flow` is a set of stream processing steps that has one open input and one open output. */ final class Flow[-In, +Out, +Mat](private[stream] override val module: Module) - extends FlowOpsMat[Out, Mat] with Graph[FlowShape[In, Out], Mat] { + extends FlowOpsMat[Out, Mat] with Graph[FlowShape[In, Out], Mat] { override val shape: FlowShape[In, Out] = module.shape.asInstanceOf[FlowShape[In, Out]] @@ -410,23 +410,23 @@ trait FlowOps[+Out, +Mat] { def recover[T >: Out](pf: PartialFunction[Throwable, T]): Repr[T] = andThen(Recover(pf)) /** - * RecoverWith allows to switch to alternative Source on flow failure. It will stay in effect after - * a failure has been recovered so that each time there is a failure it is fed into the `pf` and a new - * Source may be materialized. - * - * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements. - * This stage can recover the failure signal, but not the skipped elements, which will be dropped. - * - * '''Emits when''' element is available from the upstream or upstream is failed and element is available - * from alternative Source - * - * '''Backpressures when''' downstream backpressures - * - * '''Completes when''' upstream completes or upstream failed with exception pf can handle - * - * '''Cancels when''' downstream cancels - * - */ + * RecoverWith allows to switch to alternative Source on flow failure. It will stay in effect after + * a failure has been recovered so that each time there is a failure it is fed into the `pf` and a new + * Source may be materialized. + * + * Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements. + * This stage can recover the failure signal, but not the skipped elements, which will be dropped. + * + * '''Emits when''' element is available from the upstream or upstream is failed and element is available + * from alternative Source + * + * '''Backpressures when''' downstream backpressures + * + * '''Completes when''' upstream completes or upstream failed with exception pf can handle + * + * '''Cancels when''' downstream cancels + * + */ def recoverWith[T >: Out](pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] = via(new RecoverWith(pf)) @@ -463,30 +463,30 @@ trait FlowOps[+Out, +Mat] { * '''Cancels when''' downstream cancels * */ - def mapConcat[T](f: Out ⇒ immutable.Iterable[T]): Repr[T] = statefulMapConcat(() => f) - - /** - * Transform each input element into an `Iterable` of output elements that is - * then flattened into the output stream. The transformation is meant to be stateful, - * which is enabled by creating the transformation function anew for every materialization — - * the returned function will typically close over mutable objects to store state between - * invocations. For the stateless variant see [[FlowOps.mapConcat]]. - * - * The returned `Iterable` MUST NOT contain `null` values, - * as they are illegal as stream elements - according to the Reactive Streams specification. - * - * '''Emits when''' the mapping function returns an element or there are still remaining elements - * from the previously calculated collection - * - * '''Backpressures when''' downstream backpressures or there are still remaining elements from the - * previously calculated collection - * - * '''Completes when''' upstream completes and all remaining elements has been emitted - * - * '''Cancels when''' downstream cancels - * - * See also [[FlowOps.mapConcat]] - */ + def mapConcat[T](f: Out ⇒ immutable.Iterable[T]): Repr[T] = statefulMapConcat(() ⇒ f) + + /** + * Transform each input element into an `Iterable` of output elements that is + * then flattened into the output stream. The transformation is meant to be stateful, + * which is enabled by creating the transformation function anew for every materialization — + * the returned function will typically close over mutable objects to store state between + * invocations. For the stateless variant see [[FlowOps.mapConcat]]. + * + * The returned `Iterable` MUST NOT contain `null` values, + * as they are illegal as stream elements - according to the Reactive Streams specification. + * + * '''Emits when''' the mapping function returns an element or there are still remaining elements + * from the previously calculated collection + * + * '''Backpressures when''' downstream backpressures or there are still remaining elements from the + * previously calculated collection + * + * '''Completes when''' upstream completes and all remaining elements has been emitted + * + * '''Cancels when''' downstream cancels + * + * See also [[FlowOps.mapConcat]] + */ def statefulMapConcat[T](f: () ⇒ Out ⇒ immutable.Iterable[T]): Repr[T] = via(new StatefulMapConcat(f)) @@ -993,7 +993,7 @@ trait FlowOps[+Out, +Mat] { * * See also [[FlowOps.conflate]], [[FlowOps.limit]], [[FlowOps.limitWeighted]] [[FlowOps.batch]] [[FlowOps.batchWeighted]] */ - def conflate[O2 >: Out](aggregate: (O2, O2) => O2): Repr[O2] = conflateWithSeed[O2](ConstantFun.scalaIdentityFunction)(aggregate) + def conflate[O2 >: Out](aggregate: (O2, O2) ⇒ O2): Repr[O2] = conflateWithSeed[O2](ConstantFun.scalaIdentityFunction)(aggregate) /** * Allows a faster upstream to progress independently of a slower subscriber by aggregating elements into batches @@ -1539,11 +1539,10 @@ trait FlowOps[+Out, +Mat] { def zip[U](that: Graph[SourceShape[U], _]): Repr[(Out, U)] = via(zipGraph(that)) protected def zipGraph[U, M](that: Graph[SourceShape[U], M]): Graph[FlowShape[Out @uncheckedVariance, (Out, U)], M] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - val zip = b.add(Zip[Out, U]()) - r ~> zip.in1 - FlowShape(zip.in0, zip.out) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + val zip = b.add(Zip[Out, U]()) + r ~> zip.in1 + FlowShape(zip.in0, zip.out) } /** @@ -1562,11 +1561,10 @@ trait FlowOps[+Out, +Mat] { via(zipWithGraph(that)(combine)) protected def zipWithGraph[Out2, Out3, M](that: Graph[SourceShape[Out2], M])(combine: (Out, Out2) ⇒ Out3): Graph[FlowShape[Out @uncheckedVariance, Out3], M] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - val zip = b.add(ZipWith[Out, Out2, Out3](combine)) - r ~> zip.in1 - FlowShape(zip.in0, zip.out) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + val zip = b.add(ZipWith[Out, Out2, Out3](combine)) + r ~> zip.in1 + FlowShape(zip.in0, zip.out) } /** @@ -1595,13 +1593,12 @@ trait FlowOps[+Out, +Mat] { def interleave[U >: Out](that: Graph[SourceShape[U], _], segmentSize: Int): Repr[U] = via(interleaveGraph(that, segmentSize)) - protected def interleaveGraph[U >: Out, M](that: Graph[SourceShape[U], M], + protected def interleaveGraph[U >: Out, M](that: Graph[SourceShape[U], M], segmentSize: Int): Graph[FlowShape[Out @uncheckedVariance, U], M] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - val interleave = b.add(Interleave[U](2, segmentSize)) - r ~> interleave.in(1) - FlowShape(interleave.in(0), interleave.out) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + val interleave = b.add(Interleave[U](2, segmentSize)) + r ~> interleave.in(1) + FlowShape(interleave.in(0), interleave.out) } /** @@ -1620,11 +1617,10 @@ trait FlowOps[+Out, +Mat] { via(mergeGraph(that, eagerComplete)) protected def mergeGraph[U >: Out, M](that: Graph[SourceShape[U], M], eagerComplete: Boolean): Graph[FlowShape[Out @uncheckedVariance, U], M] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - val merge = b.add(Merge[U](2, eagerComplete)) - r ~> merge.in(1) - FlowShape(merge.in(0), merge.out) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + val merge = b.add(Merge[U](2, eagerComplete)) + r ~> merge.in(1) + FlowShape(merge.in(0), merge.out) } /** @@ -1646,11 +1642,10 @@ trait FlowOps[+Out, +Mat] { via(mergeSortedGraph(that)) protected def mergeSortedGraph[U >: Out, M](that: Graph[SourceShape[U], M])(implicit ord: Ordering[U]): Graph[FlowShape[Out @uncheckedVariance, U], M] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - val merge = b.add(new MergeSorted[U]) - r ~> merge.in1 - FlowShape(merge.in0, merge.out) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + val merge = b.add(new MergeSorted[U]) + r ~> merge.in1 + FlowShape(merge.in0, merge.out) } /** @@ -1675,11 +1670,10 @@ trait FlowOps[+Out, +Mat] { via(concatGraph(that)) protected def concatGraph[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - val merge = b.add(Concat[U]()) - r ~> merge.in(1) - FlowShape(merge.in(0), merge.out) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + val merge = b.add(Concat[U]()) + r ~> merge.in(1) + FlowShape(merge.in(0), merge.out) } /** @@ -1704,11 +1698,10 @@ trait FlowOps[+Out, +Mat] { via(prependGraph(that)) protected def prependGraph[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - val merge = b.add(Concat[U]()) - r ~> merge.in(0) - FlowShape(merge.in(1), merge.out) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + val merge = b.add(Concat[U]()) + r ~> merge.in(0) + FlowShape(merge.in(1), merge.out) } /** @@ -1754,12 +1747,11 @@ trait FlowOps[+Out, +Mat] { def alsoTo(that: Graph[SinkShape[Out], _]): Repr[Out] = via(alsoToGraph(that)) protected def alsoToGraph[M](that: Graph[SinkShape[Out], M]): Graph[FlowShape[Out @uncheckedVariance, Out], M] = - GraphDSL.create(that) { implicit b ⇒ - r ⇒ - import GraphDSL.Implicits._ - val bcast = b.add(Broadcast[Out](2)) - bcast.out(1) ~> r - FlowShape(bcast.in, bcast.out(0)) + GraphDSL.create(that) { implicit b ⇒ r ⇒ + import GraphDSL.Implicits._ + val bcast = b.add(Broadcast[Out](2)) + bcast.out(1) ~> r + FlowShape(bcast.in, bcast.out(0)) } def withAttributes(attr: Attributes): Repr[Out] diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala index b92a07b..929c6f8 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala @@ -46,10 +46,10 @@ object Framing { * the length of the size field) * @param byteOrder The ''ByteOrder'' to be used when decoding the field */ - def lengthField(fieldLength: Int, - fieldOffset: Int = 0, + def lengthField(fieldLength: Int, + fieldOffset: Int = 0, maximumFrameLength: Int, - byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = { + byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = { require(fieldLength >= 1 && fieldLength <= 4, "Length field length must be 1, 2, 3 or 4.") Flow[ByteString].transform(() ⇒ new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder)) .named("lengthFieldFraming") @@ -179,10 +179,10 @@ object Framing { } private final class LengthFieldFramingStage( - val lengthFieldLength: Int, - val lengthFieldOffset: Int, + val lengthFieldLength: Int, + val lengthFieldOffset: Int, val maximumFrameLength: Int, - val byteOrder: ByteOrder) extends PushPullStage[ByteString, ByteString] { + val byteOrder: ByteOrder) extends PushPullStage[ByteString, ByteString] { private var buffer = ByteString.empty private var frameSize = Int.MaxValue private val minimumChunkSize = lengthFieldOffset + lengthFieldLength diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala index aa478f4..2a78f2f 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala @@ -8,7 +8,7 @@ import akka.stream._ import akka.stream.impl._ import akka.stream.impl.fusing.GraphStages import akka.stream.impl.fusing.GraphStages.MaterializedValueSource -import akka.stream.impl.Stages.{ DefaultAttributes, StageModule} +import akka.stream.impl.Stages.{ DefaultAttributes, StageModule } import akka.stream.impl.StreamLayout._ import akka.stream.scaladsl.Partition.PartitionOutOfBoundsException import akka.stream.stage.{ OutHandler, InHandler, GraphStageLogic, GraphStage } @@ -993,7 +993,7 @@ object GraphDSL extends GraphApply { } private class PortOpsImpl[+Out](override val outlet: Outlet[Out @uncheckedVariance], b: Builder[_]) - extends PortOps[Out] { + extends PortOps[Out] { override def withAttributes(attr: Attributes): Repr[Out] = throw settingAttrNotSupported override def addAttributes(attr: Attributes): Repr[Out] = throw settingAttrNotSupported diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala index dc03455..260928d 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala @@ -31,9 +31,9 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { * Represents an accepted incoming TCP connection. */ final case class IncomingConnection( - localAddress: InetSocketAddress, + localAddress: InetSocketAddress, remoteAddress: InetSocketAddress, - flow: Flow[ByteString, ByteString, NotUsed]) { + flow: Flow[ByteString, ByteString, NotUsed]) { /** * Handles the connection using the given flow, which is materialized exactly once and the respective @@ -87,12 +87,12 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * independently whether the client is still attempting to write. This setting is recommended * for servers, and therefore it is the default setting. */ - def bind(interface: String, - port: Int, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = false, - idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = + def bind(interface: String, + port: Int, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = false, + idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = Source.fromGraph(new ConnectionSourceStage( IO(IoTcp)(system), new InetSocketAddress(interface, port), @@ -126,13 +126,13 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * for servers, and therefore it is the default setting. */ def bindAndHandle( - handler: Flow[ByteString, ByteString, _], - interface: String, - port: Int, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = false, - idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = { + handler: Flow[ByteString, ByteString, _], + interface: String, + port: Int, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = false, + idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = { bind(interface, port, backlog, options, halfClose, idleTimeout).to(Sink.foreach { conn: IncomingConnection ⇒ conn.flow.join(handler).run() }).run() @@ -154,12 +154,12 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * If set to false, the connection will immediately closed once the client closes its write side, * independently whether the server is still attempting to write. */ - def outgoingConnection(remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = true, - connectTimeout: Duration = Duration.Inf, - idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { + def outgoingConnection(remoteAddress: InetSocketAddress, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = true, + connectTimeout: Duration = Duration.Inf, + idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { val tcpFlow = Flow.fromGraph(new OutgoingConnectionStage( IO(IoTcp)(system), diff --git a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala index 496a9a7..98397f6 100644 --- a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala +++ b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala @@ -11,7 +11,7 @@ import akka.japi.function.{ Effect, Procedure } import akka.stream._ import akka.stream.impl.StreamLayout.Module import akka.stream.impl.fusing.{ GraphInterpreter, GraphStageModule, SubSource, SubSink } -import akka.stream.impl.{ ReactiveStreamsCompliance} +import akka.stream.impl.{ ReactiveStreamsCompliance } import scala.collection.{ immutable, mutable } import scala.concurrent.duration.FiniteDuration import akka.stream.actor.ActorSubscriberMessage @@ -122,9 +122,9 @@ object GraphStageLogic { /** * Minimal actor to work with other actors and watch them in a synchronous ways */ - final class StageActor(materializer: ActorMaterializer, + final class StageActor(materializer: ActorMaterializer, getAsyncCallback: StageActorRef.Receive ⇒ AsyncCallback[(ActorRef, Any)], - initialReceive: StageActorRef.Receive) { + initialReceive: StageActorRef.Receive) { private val callback = getAsyncCallback(internalReceive) private def cell = materializer.supervisor match { @@ -1148,9 +1148,9 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap * adding the new timer. */ final protected def schedulePeriodicallyWithInitialDelay( - timerKey: Any, + timerKey: Any, initialDelay: FiniteDuration, - interval: FiniteDuration): Unit = { + interval: FiniteDuration): Unit = { cancelTimer(timerKey) val id = timerIdGen.next() val task = interpreter.materializer.schedulePeriodically(initialDelay, interval, new Runnable { diff --git a/akka-stream/src/main/scala/akka/stream/stage/Stage.scala b/akka-stream/src/main/scala/akka/stream/stage/Stage.scala index 4a2983b..6c9503d 100644 --- a/akka-stream/src/main/scala/akka/stream/stage/Stage.scala +++ b/akka-stream/src/main/scala/akka/stream/stage/Stage.scala @@ -38,8 +38,8 @@ private[stream] object AbstractStage { private class PushPullGraphLogic[In, Out]( private val shape: FlowShape[In, Out], - val attributes: Attributes, - val stage: AbstractStage[In, Out, Directive, Directive, Context[Out], LifecycleContext]) + val attributes: Attributes, + val stage: AbstractStage[In, Out, Directive, Directive, Context[Out], LifecycleContext]) extends GraphStageLogic(shape) with DetachedContext[Out] { final override def materializer: Materializer = interpreter.materializer @@ -163,7 +163,7 @@ private[stream] object AbstractStage { } class PushPullGraphStageWithMaterializedValue[-In, +Out, Ext, +Mat]( - val factory: (Attributes) ⇒ (Stage[In, Out], Mat), + val factory: (Attributes) ⇒ (Stage[In, Out], Mat), stageAttributes: Attributes) extends GraphStageWithMaterializedValue[FlowShape[In, Out], Mat] { diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index a27a795..3167b4f 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -52,16 +52,16 @@ private[testkit] class CallingThreadDispatcherQueues extends Extension { queues = (Map.newBuilder[CallingThreadMailbox, Set[WeakReference[MessageQueue]]] /: queues) { case (m, (k, v)) ⇒ val nv = v filter (_.get ne null) - if (nv.isEmpty) m else m += (k -> nv) + if (nv.isEmpty) m else m += (k → nv) }.result } protected[akka] def registerQueue(mbox: CallingThreadMailbox, q: MessageQueue): Unit = synchronized { if (queues contains mbox) { val newSet = queues(mbox) + new WeakReference(q) - queues += mbox -> newSet + queues += mbox → newSet } else { - queues += mbox -> Set(new WeakReference(q)) + queues += mbox → Set(new WeakReference(q)) } val now = System.nanoTime if (now - lastGC > 1000000000l) { diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index aa63c9d..f297153 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -19,10 +19,10 @@ import akka.pattern.ask * @since 1.1 */ class TestActorRef[T <: Actor]( - _system: ActorSystem, - _props: Props, + _system: ActorSystem, + _props: Props, _supervisor: ActorRef, - name: String) + name: String) extends { val props = _props.withDispatcher( diff --git a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala index 9e5b441..87dba9a 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala @@ -271,9 +271,9 @@ object EventFilter { * If you want to match all Error events, the most efficient is to use Left(""). */ final case class ErrorFilter( - throwable: Class[_], - override val source: Option[String], - override val message: Either[String, Regex], + throwable: Class[_], + override val source: Option[String], + override val message: Either[String, Regex], override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { def matches(event: LogEvent) = { @@ -323,8 +323,8 @@ final case class ErrorFilter( * If you want to match all Warning events, the most efficient is to use Left(""). */ final case class WarningFilter( - override val source: Option[String], - override val message: Either[String, Regex], + override val source: Option[String], + override val message: Either[String, Regex], override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { def matches(event: LogEvent) = { @@ -366,8 +366,8 @@ final case class WarningFilter( * If you want to match all Info events, the most efficient is to use Left(""). */ final case class InfoFilter( - override val source: Option[String], - override val message: Either[String, Regex], + override val source: Option[String], + override val message: Either[String, Regex], override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { def matches(event: LogEvent) = { @@ -409,8 +409,8 @@ final case class InfoFilter( * If you want to match all Debug events, the most efficient is to use Left(""). */ final case class DebugFilter( - override val source: Option[String], - override val message: Either[String, Regex], + override val source: Option[String], + override val message: Either[String, Regex], override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { def matches(event: LogEvent) = { diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala index 36be493..72bee30 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala @@ -33,10 +33,10 @@ import scala.reflect.ClassTag * @since 1.2 */ class TestFSMRef[S, D, T <: Actor]( - system: ActorSystem, - props: Props, + system: ActorSystem, + props: Props, supervisor: ActorRef, - name: String)(implicit ev: T <:< FSM[S, D]) + name: String)(implicit ev: T <:< FSM[S, D]) extends TestActorRef[T](system, props, supervisor, name) { private def fsm: T = underlyingActor diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index a6fcc2f..8e6ddae 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -679,9 +679,9 @@ trait TestKitBase { * * If verifySystemShutdown is true, then an exception will be thrown on failure. */ - def shutdown(actorSystem: ActorSystem = system, - duration: Duration = 5.seconds.dilated.min(10.seconds), - verifySystemShutdown: Boolean = false) { + def shutdown(actorSystem: ActorSystem = system, + duration: Duration = 5.seconds.dilated.min(10.seconds), + verifySystemShutdown: Boolean = false) { TestKit.shutdownActorSystem(actorSystem, duration, verifySystemShutdown) } @@ -771,9 +771,9 @@ object TestKit { * * If verifySystemShutdown is true, then an exception will be thrown on failure. */ - def shutdownActorSystem(actorSystem: ActorSystem, - duration: Duration = 10.seconds, - verifySystemShutdown: Boolean = false): Unit = { + def shutdownActorSystem(actorSystem: ActorSystem, + duration: Duration = 10.seconds, + verifySystemShutdown: Boolean = false): Unit = { actorSystem.terminate() try Await.ready(actorSystem.whenTerminated, duration) catch { case _: TimeoutException ⇒ diff --git a/akka-testkit/src/main/scala/akka/testkit/package.scala b/akka-testkit/src/main/scala/akka/testkit/package.scala index 46bd5ea..c073e59 100644 --- a/akka-testkit/src/main/scala/akka/testkit/package.scala +++ b/akka-testkit/src/main/scala/akka/testkit/package.scala @@ -3,7 +3,6 @@ */ package akka - import akka.actor.ActorSystem import scala.concurrent.duration.{ Duration, FiniteDuration } import scala.reflect.ClassTag diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala index e470d7c..b065558 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala @@ -35,8 +35,8 @@ class AkkaSpecSpec extends WordSpec with Matchers { // verbose config just for demonstration purposes, please leave in in case of debugging import scala.collection.JavaConverters._ val conf = Map( - "akka.actor.debug.lifecycle" -> true, "akka.actor.debug.event-stream" -> true, - "akka.loglevel" -> "DEBUG", "akka.stdout-loglevel" -> "DEBUG") + "akka.actor.debug.lifecycle" → true, "akka.actor.debug.event-stream" → true, + "akka.loglevel" → "DEBUG", "akka.stdout-loglevel" → "DEBUG") val system = ActorSystem("AkkaSpec1", ConfigFactory.parseMap(conf.asJava).withFallback(AkkaSpec.testConf)) var refs = Seq.empty[ActorRef] val spec = new AkkaSpec(system) { refs = Seq(testActor, system.actorOf(Props.empty, "name")) } diff --git a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala index 9edabd8..1ce81c9 100644 --- a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala +++ b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala @@ -78,7 +78,7 @@ object Coroner { */ def watch(duration: FiniteDuration, reportTitle: String, out: PrintStream, startAndStopDuration: FiniteDuration = defaultStartAndStopDuration, - displayThreadCounts: Boolean = false): WatchHandle = { + displayThreadCounts: Boolean = false): WatchHandle = { val watchedHandle = new WatchHandleImpl(startAndStopDuration) diff --git a/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala index a56bcf3..15c2279 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala @@ -4,7 +4,7 @@ import scala.concurrent.duration._ import org.scalatest.exceptions.TestFailedException @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class TestTimeSpec extends AkkaSpec(Map("akka.test.timefactor" -> 2.0)) { +class TestTimeSpec extends AkkaSpec(Map("akka.test.timefactor" → 2.0)) { "A TestKit" must { diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala index 7ecf45d..3d28cda 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala @@ -18,15 +18,15 @@ private[akka] class FileDescriptorMetricSet(os: OperatingSystemMXBean = Manageme override def getMetrics: util.Map[String, Metric] = { Map[String, Metric]( - name("file-descriptors", "open") -> new Gauge[Long] { + name("file-descriptors", "open") → new Gauge[Long] { override def getValue: Long = invoke("getOpenFileDescriptorCount") }, - name("file-descriptors", "max") -> new Gauge[Long] { + name("file-descriptors", "max") → new Gauge[Long] { override def getValue: Long = invoke("getMaxFileDescriptorCount") }, - name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava + name("file-descriptors", "ratio") → new FileDescriptorRatioGauge(os)).asJava } private def invoke(name: String): Long = { diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala index 8c7e983..bff7639 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala @@ -16,9 +16,9 @@ import org.{ HdrHistogram ⇒ hdr } * integer between 0 and 5. */ private[akka] class HdrHistogram( - highestTrackableValue: Long, + highestTrackableValue: Long, numberOfSignificantValueDigits: Int, - val unit: String = "") + val unit: String = "") extends Metric { private val hist = new hdr.Histogram(highestTrackableValue, numberOfSignificantValueDigits) diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala index 97d2109..d172e49 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala @@ -208,7 +208,7 @@ trait AkkaMetricRegistry { for { (key, metric) ← getMetrics.asScala if clazz.isInstance(metric) - } yield key -> metric.asInstanceOf[T] + } yield key → metric.asInstanceOf[T] } private[akka] class MetricsKitSettings(config: Config) { diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala index 016e864..6f46358 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala @@ -91,6 +91,6 @@ private[metrics] trait MetricsPrefix extends MetricSet { abstract override def getMetrics: util.Map[String, Metric] = { // does not have to be fast, is only called once during registering registry import collection.JavaConverters._ - (super.getMetrics.asScala.map { case (k, v) ⇒ (prefix / k).toString -> v }).asJava + (super.getMetrics.asScala.map { case (k, v) ⇒ (prefix / k).toString → v }).asJava } } diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala index da56d17..69d0e77 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala @@ -15,8 +15,8 @@ import scala.reflect.ClassTag */ class AkkaConsoleReporter( registry: AkkaMetricRegistry, - verbose: Boolean, - output: PrintStream = System.out) + verbose: Boolean, + output: PrintStream = System.out) extends ScheduledReporter(registry.asInstanceOf[MetricRegistry], "akka-console-reporter", MetricFilter.ALL, TimeUnit.SECONDS, TimeUnit.NANOSECONDS) { private final val ConsoleWidth = 80 diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala index 8571112..b0c6793 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaGraphiteReporter.scala @@ -16,9 +16,9 @@ import scala.concurrent.duration._ */ class AkkaGraphiteReporter( registry: AkkaMetricRegistry, - prefix: String, + prefix: String, graphite: GraphiteClient, - verbose: Boolean = false) + verbose: Boolean = false) extends ScheduledReporter(registry.asInstanceOf[MetricRegistry], "akka-graphite-reporter", MetricFilter.ALL, TimeUnit.SECONDS, TimeUnit.NANOSECONDS) { // todo get rid of ScheduledReporter (would mean removing codahale metrics)? diff --git a/akka-typed/src/main/scala/akka/typed/ActorContext.scala b/akka-typed/src/main/scala/akka/typed/ActorContext.scala index 4592407..4bcf0ca 100644 --- a/akka-typed/src/main/scala/akka/typed/ActorContext.scala +++ b/akka-typed/src/main/scala/akka/typed/ActorContext.scala @@ -155,9 +155,9 @@ trait ActorContext[T] { * See [[EffectfulActorContext]] for more advanced uses. */ class StubbedActorContext[T]( - val name: String, + val name: String, override val props: Props[T])( - override implicit val system: ActorSystem[Nothing]) extends ActorContext[T] { + override implicit val system: ActorSystem[Nothing]) extends ActorContext[T] { val inbox = Inbox.sync[T](name) override val self = inbox.ref @@ -169,7 +169,7 @@ class StubbedActorContext[T]( override def child(name: String): Option[ActorRef[Nothing]] = _children get name map (_.ref) override def spawnAnonymous[U](props: Props[U]): ActorRef[U] = { val i = Inbox.sync[U](childName.next()) - _children += i.ref.untypedRef.path.name -> i + _children += i.ref.untypedRef.path.name → i i.ref } override def spawn[U](props: Props[U], name: String): ActorRef[U] = @@ -177,12 +177,12 @@ class StubbedActorContext[T]( case Some(_) ⇒ throw new untyped.InvalidActorNameException(s"actor name $name is already taken") case None ⇒ val i = Inbox.sync[U](name) - _children += name -> i + _children += name → i i.ref } override def actorOf(props: untyped.Props): untyped.ActorRef = { val i = Inbox.sync[Any](childName.next()) - _children += i.ref.untypedRef.path.name -> i + _children += i.ref.untypedRef.path.name → i i.ref.untypedRef } override def actorOf(props: untyped.Props, name: String): untyped.ActorRef = @@ -190,7 +190,7 @@ class StubbedActorContext[T]( case Some(_) ⇒ throw new untyped.InvalidActorNameException(s"actor name $name is already taken") case None ⇒ val i = Inbox.sync[Any](name) - _children += name -> i + _children += name → i i.ref.untypedRef } override def stop(child: ActorRef[Nothing]): Boolean = { @@ -219,13 +219,13 @@ class StubbedActorContext[T]( /* * TODO - * + * * Currently running a behavior requires that the context stays the same, since * the behavior may well close over it and thus a change might not be effective * at all. Another issue is that there is genuine state within the context that * is coupled to the behavior’s state: if child actors were created then * migrating a behavior into a new context will not work. - * + * * This note is about remembering the reasons behind this restriction and * proposes an ActorContextProxy as a (broken) half-solution. Another avenue * by which a solution may be explored is for Pure behaviors in that they diff --git a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala index 0357243..2d338f2 100644 --- a/akka-typed/src/main/scala/akka/typed/ActorSystem.scala +++ b/akka-typed/src/main/scala/akka/typed/ActorSystem.scala @@ -127,8 +127,8 @@ object ActorSystem { private class Wrapper(val untyped: ExtendedActorSystem) extends ActorSystem[Nothing](untyped.name) with ScalaActorRef[Nothing] def apply[T](name: String, guardianProps: Props[T], - config: Option[Config] = None, - classLoader: Option[ClassLoader] = None, + config: Option[Config] = None, + classLoader: Option[ClassLoader] = None, executionContext: Option[ExecutionContext] = None): ActorSystem[T] = { val cl = classLoader.getOrElse(akka.actor.ActorSystem.findClassLoader()) val appConfig = config.getOrElse(ConfigFactory.load(cl)) diff --git a/akka-typed/src/main/scala/akka/typed/Behavior.scala b/akka-typed/src/main/scala/akka/typed/Behavior.scala index b68f3ee..c592535 100644 --- a/akka-typed/src/main/scala/akka/typed/Behavior.scala +++ b/akka-typed/src/main/scala/akka/typed/Behavior.scala @@ -3,7 +3,6 @@ */ package akka.typed - /** * The behavior of an actor defines how it reacts to the messages that it * receives. The message may either be of the type that the Actor declares @@ -58,7 +57,7 @@ abstract class Behavior[T] { /* * FIXME - * + * * Closing over ActorContext makes a Behavior immobile: it cannot be moved to * another context and executed there, and therefore it cannot be replicated or * forked either. diff --git a/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala b/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala index 708cfdc..ec00faa 100644 --- a/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala +++ b/akka-typed/src/test/scala/akka/typed/ActorContextSpec.scala @@ -191,10 +191,10 @@ class ActorContextSpec extends TypedSpec(ConfigFactory.parseString( * The latter is very useful in order to avoid disturbances with GotSignal(PostStop) in * test procedures that stop this child. */ - def mkChild(name: Option[String], + def mkChild(name: Option[String], monitor: ActorRef[Event], - self: ActorRef[Event], - inert: Boolean = false): StepWise.Steps[Event, (ActorRef[Command], ActorRef[Command])] = { + self: ActorRef[Event], + inert: Boolean = false): StepWise.Steps[Event, (ActorRef[Command], ActorRef[Command])] = { val s = startWith.keep { subj ⇒ subj ! MkChild(name, monitor, self) diff --git a/akka-typed/src/test/scala/akka/typed/StepWise.scala b/akka-typed/src/test/scala/akka/typed/StepWise.scala index 91074a3..7c61407 100644 --- a/akka-typed/src/test/scala/akka/typed/StepWise.scala +++ b/akka-typed/src/test/scala/akka/typed/StepWise.scala @@ -91,7 +91,7 @@ object StepWise { copy(ops = MultiMessage(timeout, count, (msgs, value) ⇒ { f.asInstanceOf[(Seq[Any], Any) ⇒ Any](msgs, value); value }, getTrace()) :: ops) def expectFailureKeep(timeout: FiniteDuration)(f: (Failed, U) ⇒ Failed.Decision): Steps[T, U] = - copy(ops = Failure(timeout, (failed, value) ⇒ f.asInstanceOf[(Failed, Any) ⇒ Failed.Decision](failed, value) -> value, getTrace()) :: ops) + copy(ops = Failure(timeout, (failed, value) ⇒ f.asInstanceOf[(Failed, Any) ⇒ Failed.Decision](failed, value) → value, getTrace()) :: ops) def expectTerminationKeep(timeout: FiniteDuration)(f: (Terminated, U) ⇒ Unit): Steps[T, U] = copy(ops = Termination(timeout, (t, value) ⇒ { f.asInstanceOf[(Terminated, Any) ⇒ Any](t, value); value }, getTrace()) :: ops) diff --git a/akka-typed/src/test/scala/akka/typed/TypedSpec.scala b/akka-typed/src/test/scala/akka/typed/TypedSpec.scala index 36ef58f..5feca93 100644 --- a/akka-typed/src/test/scala/akka/typed/TypedSpec.scala +++ b/akka-typed/src/test/scala/akka/typed/TypedSpec.scala @@ -64,11 +64,11 @@ class TypedSpec(config: Config) extends Spec with Matchers with BeforeAndAfterAl } def muteExpectedException[T <: Exception: ClassTag]( - message: String = null, - source: String = null, - start: String = "", - pattern: String = null, - occurrences: Int = Int.MaxValue): EventFilter = { + message: String = null, + source: String = null, + start: String = "", + pattern: String = null, + occurrences: Int = Int.MaxValue): EventFilter = { val filter = EventFilter(message, source, start, pattern, occurrences) system.eventStream.publish(Mute(filter)) filter