From 4d0e37594cb223b091ba2ec126eabe89ac8c13f8 Mon Sep 17 00:00:00 2001 From: Ivan Topolnjak Date: Sun, 28 Dec 2014 07:59:59 +0100 Subject: ! core,akka: separate all akka instrumentation into it's own kamon-akka module, related to #136. All Akka-related instrumentation and code has been moved to the kamon-akka module, including the filters for actor, dispatcher and router metrics. Also the following changes are included: - Router Metrics are now working properly, related to #139. - Cleanup the log output for this module, related to #142. - Some minor cleanups in various tests. This PR breaks the reporting modules which will need to wait for #141 to be ready to come back to life. --- kamon-akka/src/main/resources/META-INF/aop.xml | 34 +++ kamon-akka/src/main/resources/reference.conf | 51 ++++ .../src/main/scala/kamon/akka/ActorMetrics.scala | 94 +++++++ .../src/main/scala/kamon/akka/AkkaExtension.scala | 32 +++ .../main/scala/kamon/akka/DispatcherMetrics.scala | 94 +++++++ .../src/main/scala/kamon/akka/RouterMetrics.scala | 89 +++++++ .../akka/ActorCellInstrumentation.scala | 213 +++++++++++++++ .../akka/ActorLoggingInstrumentation.scala | 50 ++++ .../akka/ActorSystemMessageInstrumentation.scala | 80 ++++++ .../akka/AskPatternInstrumentation.scala | 83 ++++++ .../akka/DispatcherInstrumentation.scala | 164 ++++++++++++ kamon-akka/src/test/resources/logback.xml | 14 + .../akka/ActorCellInstrumentationSpec.scala | 125 +++++++++ .../akka/ActorLoggingInstrumentationSpec.scala | 71 +++++ .../ActorSystemMessageInstrumentationSpec.scala | 176 ++++++++++++ .../akka/AskPatternInstrumentationSpec.scala | 134 ++++++++++ .../test/scala/kamon/metric/ActorMetricsSpec.scala | 228 ++++++++++++++++ .../scala/kamon/metric/DispatcherMetricsSpec.scala | 110 ++++++++ .../scala/kamon/metric/RouterMetricsSpec.scala | 296 +++++++++++++++++++++ 19 files changed, 2138 insertions(+) create mode 100644 kamon-akka/src/main/resources/META-INF/aop.xml create mode 100644 kamon-akka/src/main/resources/reference.conf create mode 100644 kamon-akka/src/main/scala/kamon/akka/ActorMetrics.scala create mode 100644 kamon-akka/src/main/scala/kamon/akka/AkkaExtension.scala create mode 100644 kamon-akka/src/main/scala/kamon/akka/DispatcherMetrics.scala create mode 100644 kamon-akka/src/main/scala/kamon/akka/RouterMetrics.scala create mode 100644 kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorCellInstrumentation.scala create mode 100644 kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorLoggingInstrumentation.scala create mode 100644 kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentation.scala create mode 100644 kamon-akka/src/main/scala/kamon/instrumentation/akka/AskPatternInstrumentation.scala create mode 100644 kamon-akka/src/main/scala/kamon/instrumentation/akka/DispatcherInstrumentation.scala create mode 100644 kamon-akka/src/test/resources/logback.xml create mode 100644 kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorCellInstrumentationSpec.scala create mode 100644 kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorLoggingInstrumentationSpec.scala create mode 100644 kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentationSpec.scala create mode 100644 kamon-akka/src/test/scala/kamon/instrumentation/akka/AskPatternInstrumentationSpec.scala create mode 100644 kamon-akka/src/test/scala/kamon/metric/ActorMetricsSpec.scala create mode 100644 kamon-akka/src/test/scala/kamon/metric/DispatcherMetricsSpec.scala create mode 100644 kamon-akka/src/test/scala/kamon/metric/RouterMetricsSpec.scala (limited to 'kamon-akka') diff --git a/kamon-akka/src/main/resources/META-INF/aop.xml b/kamon-akka/src/main/resources/META-INF/aop.xml new file mode 100644 index 00000000..46e63f91 --- /dev/null +++ b/kamon-akka/src/main/resources/META-INF/aop.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kamon-akka/src/main/resources/reference.conf b/kamon-akka/src/main/resources/reference.conf new file mode 100644 index 00000000..4f742ee6 --- /dev/null +++ b/kamon-akka/src/main/resources/reference.conf @@ -0,0 +1,51 @@ +# ================================== # +# Kamon-Akka Reference Configuration # +# ================================== # + +kamon { + akka { + # If ask-pattern-timeout-warning is enabled, a WARN level log message will be generated if a future generated by the `ask` + # pattern fails with a `AskTimeoutException` and the log message will contain information depending of the strategy selected. + # strategies: + # - off: nothing to do. + # - lightweight: logs the warning when a timeout is reached using org.aspectj.lang.reflect.SourceLocation. + # - heavyweight: logs the warning when a timeout is reached using a stack trace captured at the moment the future was created. + ask-pattern-timeout-warning = off + + # Default dispatcher for all akka module operations + dispatcher = ${kamon.default-dispatcher} + } + + metrics.precision { + actor { + processing-time = ${kamon.metrics.precision.default-histogram-precision} + time-in-mailbox = ${kamon.metrics.precision.default-histogram-precision} + mailbox-size = ${kamon.metrics.precision.default-min-max-counter-precision} + } + + router { + routing-time = ${kamon.metrics.precision.default-histogram-precision} + processing-time = ${kamon.metrics.precision.default-histogram-precision} + time-in-mailbox = ${kamon.metrics.precision.default-histogram-precision} + } + + dispatcher { + maximum-pool-size { + highest-trackable-value = 999999999 + significant-value-digits = 2 + } + running-thread-count { + highest-trackable-value = 999999999 + significant-value-digits = 2 + } + queued-task-count { + highest-trackable-value = 999999999 + significant-value-digits = 2 + } + pool-size { + highest-trackable-value = 999999999 + significant-value-digits = 2 + } + } + } +} \ No newline at end of file diff --git a/kamon-akka/src/main/scala/kamon/akka/ActorMetrics.scala b/kamon-akka/src/main/scala/kamon/akka/ActorMetrics.scala new file mode 100644 index 00000000..b22f7fa9 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/akka/ActorMetrics.scala @@ -0,0 +1,94 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.akka + +import akka.actor.ActorSystem +import com.typesafe.config.Config +import kamon.metric._ +import kamon.metric.instrument.{ Counter, Histogram, MinMaxCounter } + +case class ActorMetrics(name: String) extends MetricGroupIdentity { + val category = ActorMetrics +} + +object ActorMetrics extends MetricGroupCategory { + val name = "actor" + + case object ProcessingTime extends MetricIdentity { val name = "processing-time" } + case object MailboxSize extends MetricIdentity { val name = "mailbox-size" } + case object TimeInMailbox extends MetricIdentity { val name = "time-in-mailbox" } + case object Errors extends MetricIdentity { val name = "errors" } + + case class ActorMetricsRecorder(processingTime: Histogram, timeInMailbox: Histogram, mailboxSize: MinMaxCounter, + errors: Counter) extends MetricGroupRecorder { + + def collect(context: CollectionContext): ActorMetricSnapshot = + ActorMetricSnapshot( + processingTime.collect(context), + timeInMailbox.collect(context), + mailboxSize.collect(context), + errors.collect(context)) + + def cleanup: Unit = { + processingTime.cleanup + mailboxSize.cleanup + timeInMailbox.cleanup + errors.cleanup + } + } + + case class ActorMetricSnapshot(processingTime: Histogram.Snapshot, timeInMailbox: Histogram.Snapshot, + mailboxSize: Histogram.Snapshot, errors: Counter.Snapshot) extends MetricGroupSnapshot { + + type GroupSnapshotType = ActorMetricSnapshot + + def merge(that: ActorMetricSnapshot, context: CollectionContext): ActorMetricSnapshot = + ActorMetricSnapshot( + processingTime.merge(that.processingTime, context), + timeInMailbox.merge(that.timeInMailbox, context), + mailboxSize.merge(that.mailboxSize, context), + errors.merge(that.errors, context)) + + lazy val metrics: Map[MetricIdentity, MetricSnapshot] = Map( + (ProcessingTime -> processingTime), + (MailboxSize -> mailboxSize), + (TimeInMailbox -> timeInMailbox), + (Errors -> errors)) + } + + val Factory = ActorMetricGroupFactory +} + +case object ActorMetricGroupFactory extends MetricGroupFactory { + import kamon.akka.ActorMetrics._ + + type GroupRecorder = ActorMetricsRecorder + + def create(config: Config, system: ActorSystem): ActorMetricsRecorder = { + val settings = config.getConfig("precision.actor") + + val processingTimeConfig = settings.getConfig("processing-time") + val timeInMailboxConfig = settings.getConfig("time-in-mailbox") + val mailboxSizeConfig = settings.getConfig("mailbox-size") + + new ActorMetricsRecorder( + Histogram.fromConfig(processingTimeConfig), + Histogram.fromConfig(timeInMailboxConfig), + MinMaxCounter.fromConfig(mailboxSizeConfig, system), + Counter()) + } +} diff --git a/kamon-akka/src/main/scala/kamon/akka/AkkaExtension.scala b/kamon-akka/src/main/scala/kamon/akka/AkkaExtension.scala new file mode 100644 index 00000000..bc013b63 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/akka/AkkaExtension.scala @@ -0,0 +1,32 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.akka + +import akka.actor +import akka.actor._ +import kamon._ + +class AkkaExtension(system: ExtendedActorSystem) extends Kamon.Extension { + val config = system.settings.config.getConfig("kamon.akka") + val askPatternTimeoutWarning = config.getString("ask-pattern-timeout-warning") + val dispatcher = system.dispatchers.lookup(config.getString("dispatcher")) +} + +object Akka extends ExtensionId[AkkaExtension] with ExtensionIdProvider { + def lookup(): ExtensionId[_ <: actor.Extension] = Akka + def createExtension(system: ExtendedActorSystem): AkkaExtension = new AkkaExtension(system) +} \ No newline at end of file diff --git a/kamon-akka/src/main/scala/kamon/akka/DispatcherMetrics.scala b/kamon-akka/src/main/scala/kamon/akka/DispatcherMetrics.scala new file mode 100644 index 00000000..64e16f96 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/akka/DispatcherMetrics.scala @@ -0,0 +1,94 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.akka + +import akka.actor.ActorSystem +import com.typesafe.config.Config +import kamon.metric._ +import kamon.metric.instrument.Histogram + +case class DispatcherMetrics(name: String) extends MetricGroupIdentity { + val category = DispatcherMetrics +} + +object DispatcherMetrics extends MetricGroupCategory { + val name = "dispatcher" + + case object MaximumPoolSize extends MetricIdentity { val name = "maximum-pool-size" } + case object RunningThreadCount extends MetricIdentity { val name = "running-thread-count" } + case object QueueTaskCount extends MetricIdentity { val name = "queued-task-count" } + case object PoolSize extends MetricIdentity { val name = "pool-size" } + + case class DispatcherMetricRecorder(maximumPoolSize: Histogram, runningThreadCount: Histogram, + queueTaskCount: Histogram, poolSize: Histogram) + extends MetricGroupRecorder { + + def collect(context: CollectionContext): MetricGroupSnapshot = + DispatcherMetricSnapshot( + maximumPoolSize.collect(context), + runningThreadCount.collect(context), + queueTaskCount.collect(context), + poolSize.collect(context)) + + def cleanup: Unit = {} + + } + + case class DispatcherMetricSnapshot(maximumPoolSize: Histogram.Snapshot, runningThreadCount: Histogram.Snapshot, + queueTaskCount: Histogram.Snapshot, poolSize: Histogram.Snapshot) extends MetricGroupSnapshot { + + type GroupSnapshotType = DispatcherMetricSnapshot + + def merge(that: DispatcherMetricSnapshot, context: CollectionContext): DispatcherMetricSnapshot = + DispatcherMetricSnapshot( + maximumPoolSize.merge(that.maximumPoolSize, context), + runningThreadCount.merge(that.runningThreadCount, context), + queueTaskCount.merge(that.queueTaskCount, context), + poolSize.merge(that.poolSize, context)) + + lazy val metrics: Map[MetricIdentity, MetricSnapshot] = Map( + (MaximumPoolSize -> maximumPoolSize), + (RunningThreadCount -> runningThreadCount), + (QueueTaskCount -> queueTaskCount), + (PoolSize -> poolSize)) + } + + val Factory = DispatcherMetricGroupFactory +} + +case object DispatcherMetricGroupFactory extends MetricGroupFactory { + + import kamon.akka.DispatcherMetrics._ + + type GroupRecorder = DispatcherMetricRecorder + + def create(config: Config, system: ActorSystem): DispatcherMetricRecorder = { + val settings = config.getConfig("precision.dispatcher") + + val maximumPoolSizeConfig = settings.getConfig("maximum-pool-size") + val runningThreadCountConfig = settings.getConfig("running-thread-count") + val queueTaskCountConfig = settings.getConfig("queued-task-count") + val poolSizeConfig = settings.getConfig("pool-size") + + new DispatcherMetricRecorder( + Histogram.fromConfig(maximumPoolSizeConfig), + Histogram.fromConfig(runningThreadCountConfig), + Histogram.fromConfig(queueTaskCountConfig), + Histogram.fromConfig(poolSizeConfig)) + } + +} diff --git a/kamon-akka/src/main/scala/kamon/akka/RouterMetrics.scala b/kamon-akka/src/main/scala/kamon/akka/RouterMetrics.scala new file mode 100644 index 00000000..2eedf764 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/akka/RouterMetrics.scala @@ -0,0 +1,89 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ +package kamon.akka + +import akka.actor.ActorSystem +import com.typesafe.config.Config +import kamon.metric._ +import kamon.metric.instrument.{ Counter, Histogram } + +case class RouterMetrics(name: String) extends MetricGroupIdentity { + val category = RouterMetrics +} + +object RouterMetrics extends MetricGroupCategory { + val name = "router" + + case object RoutingTime extends MetricIdentity { val name = "routing-time" } + case object ProcessingTime extends MetricIdentity { val name = "processing-time" } + case object TimeInMailbox extends MetricIdentity { val name = "time-in-mailbox" } + case object Errors extends MetricIdentity { val name = "errors" } + + case class RouterMetricsRecorder(routingTime: Histogram, processingTime: Histogram, timeInMailbox: Histogram, errors: Counter) extends MetricGroupRecorder { + + def collect(context: CollectionContext): RouterMetricSnapshot = + RouterMetricSnapshot(routingTime.collect(context), processingTime.collect(context), timeInMailbox.collect(context), errors.collect(context)) + + def cleanup: Unit = { + routingTime.cleanup + processingTime.cleanup + timeInMailbox.cleanup + errors.cleanup + } + } + + case class RouterMetricSnapshot(routingTime: Histogram.Snapshot, processingTime: Histogram.Snapshot, timeInMailbox: Histogram.Snapshot, errors: Counter.Snapshot) extends MetricGroupSnapshot { + + type GroupSnapshotType = RouterMetricSnapshot + + def merge(that: RouterMetricSnapshot, context: CollectionContext): RouterMetricSnapshot = + RouterMetricSnapshot( + routingTime.merge(that.routingTime, context), + processingTime.merge(that.processingTime, context), + timeInMailbox.merge(that.timeInMailbox, context), + errors.merge(that.errors, context)) + + lazy val metrics: Map[MetricIdentity, MetricSnapshot] = Map( + RoutingTime -> routingTime, + ProcessingTime -> processingTime, + TimeInMailbox -> timeInMailbox, + Errors -> errors) + } + + val Factory = RouterMetricGroupFactory +} + +case object RouterMetricGroupFactory extends MetricGroupFactory { + + import kamon.akka.RouterMetrics._ + + type GroupRecorder = RouterMetricsRecorder + + def create(config: Config, system: ActorSystem): RouterMetricsRecorder = { + val settings = config.getConfig("precision.router") + + val routingTimeConfig = settings.getConfig("routing-time") + val processingTimeConfig = settings.getConfig("processing-time") + val timeInMailboxConfig = settings.getConfig("time-in-mailbox") + + new RouterMetricsRecorder( + Histogram.fromConfig(routingTimeConfig), + Histogram.fromConfig(processingTimeConfig), + Histogram.fromConfig(timeInMailboxConfig), + Counter()) + } +} + diff --git a/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorCellInstrumentation.scala b/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorCellInstrumentation.scala new file mode 100644 index 00000000..78d88583 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorCellInstrumentation.scala @@ -0,0 +1,213 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package akka.kamon.instrumentation + +import akka.actor._ +import akka.dispatch.{ Envelope, MessageDispatcher } +import akka.routing.RoutedActorCell +import kamon.Kamon +import kamon.akka.{ RouterMetrics, ActorMetrics } +import ActorMetrics.ActorMetricsRecorder +import RouterMetrics.RouterMetricsRecorder +import kamon.metric.Metrics +import kamon.trace._ +import org.aspectj.lang.ProceedingJoinPoint +import org.aspectj.lang.annotation._ + +@Aspect +class ActorCellInstrumentation { + + @Pointcut("execution(akka.actor.ActorCell.new(..)) && this(cell) && args(system, ref, props, dispatcher, parent)") + def actorCellCreation(cell: ActorCell, system: ActorSystem, ref: ActorRef, props: Props, dispatcher: MessageDispatcher, parent: ActorRef): Unit = {} + + @After("actorCellCreation(cell, system, ref, props, dispatcher, parent)") + def afterCreation(cell: ActorCell, system: ActorSystem, ref: ActorRef, props: Props, dispatcher: MessageDispatcher, parent: ActorRef): Unit = { + val metricsExtension = Kamon(Metrics)(system) + val metricIdentity = ActorMetrics(ref.path.elements.mkString("/")) + val cellMetrics = cell.asInstanceOf[ActorCellMetrics] + + cellMetrics.actorMetricIdentity = metricIdentity + cellMetrics.actorMetricsRecorder = metricsExtension.register(metricIdentity, ActorMetrics.Factory) + } + + @Pointcut("execution(* akka.actor.ActorCell.invoke(*)) && this(cell) && args(envelope)") + def invokingActorBehaviourAtActorCell(cell: ActorCell, envelope: Envelope) = {} + + @Around("invokingActorBehaviourAtActorCell(cell, envelope)") + def aroundBehaviourInvoke(pjp: ProceedingJoinPoint, cell: ActorCell, envelope: Envelope): Any = { + val cellMetrics = cell.asInstanceOf[ActorCellMetrics] + val timestampBeforeProcessing = System.nanoTime() + val contextAndTimestamp = envelope.asInstanceOf[TimestampedTraceContextAware] + + try { + TraceRecorder.withInlineTraceContextReplacement(contextAndTimestamp.traceContext) { + pjp.proceed() + } + } finally { + cellMetrics.actorMetricsRecorder.map { am ⇒ + val processingTime = System.nanoTime() - timestampBeforeProcessing + val timeInMailbox = timestampBeforeProcessing - contextAndTimestamp.captureNanoTime + + am.processingTime.record(processingTime) + am.timeInMailbox.record(timeInMailbox) + am.mailboxSize.decrement() + + // In case that this actor is behind a router, record the metrics for the router. + envelope.asInstanceOf[RouterAwareEnvelope].routerMetricsRecorder.map { rm ⇒ + rm.processingTime.record(processingTime) + rm.timeInMailbox.record(timeInMailbox) + } + } + } + } + + @Pointcut("execution(* akka.actor.ActorCell.sendMessage(*)) && this(cell) && args(envelope)") + def sendMessageInActorCell(cell: ActorCell, envelope: Envelope): Unit = {} + + @After("sendMessageInActorCell(cell, envelope)") + def afterSendMessageInActorCell(cell: ActorCell, envelope: Envelope): Unit = { + val cellMetrics = cell.asInstanceOf[ActorCellMetrics] + cellMetrics.actorMetricsRecorder.map(_.mailboxSize.increment()) + } + + @Pointcut("execution(* akka.actor.ActorCell.stop()) && this(cell)") + def actorStop(cell: ActorCell): Unit = {} + + @After("actorStop(cell)") + def afterStop(cell: ActorCell): Unit = { + val cellMetrics = cell.asInstanceOf[ActorCellMetrics] + cellMetrics.actorMetricsRecorder.map { _ ⇒ + Kamon(Metrics)(cell.system).unregister(cellMetrics.actorMetricIdentity) + } + + // The Stop can't be captured from the RoutedActorCell so we need to put this piece of cleanup here. + if (cell.isInstanceOf[RoutedActorCell]) { + val routedCellMetrics = cell.asInstanceOf[RoutedActorCellMetrics] + routedCellMetrics.routerMetricsRecorder.map { _ ⇒ + Kamon(Metrics)(cell.system).unregister(routedCellMetrics.routerMetricIdentity) + } + } + } + + @Pointcut("execution(* akka.actor.ActorCell.handleInvokeFailure(..)) && this(cell)") + def actorInvokeFailure(cell: ActorCell): Unit = {} + + @Before("actorInvokeFailure(cell)") + def beforeInvokeFailure(cell: ActorCell): Unit = { + val cellWithMetrics = cell.asInstanceOf[ActorCellMetrics] + cellWithMetrics.actorMetricsRecorder.map(_.errors.increment()) + + // In case that this actor is behind a router, count the errors for the router as well. + val envelope = cell.currentMessage.asInstanceOf[RouterAwareEnvelope] + envelope.routerMetricsRecorder.map(_.errors.increment()) + } +} + +@Aspect +class RoutedActorCellInstrumentation { + + @Pointcut("execution(akka.routing.RoutedActorCell.new(..)) && this(cell) && args(system, ref, props, dispatcher, routeeProps, supervisor)") + def routedActorCellCreation(cell: RoutedActorCell, system: ActorSystem, ref: ActorRef, props: Props, dispatcher: MessageDispatcher, routeeProps: Props, supervisor: ActorRef): Unit = {} + + @After("routedActorCellCreation(cell, system, ref, props, dispatcher, routeeProps, supervisor)") + def afterRoutedActorCellCreation(cell: RoutedActorCell, system: ActorSystem, ref: ActorRef, props: Props, dispatcher: MessageDispatcher, routeeProps: Props, supervisor: ActorRef): Unit = { + val metricsExtension = Kamon(Metrics)(system) + val metricIdentity = RouterMetrics(ref.path.elements.mkString("/")) + val cellMetrics = cell.asInstanceOf[RoutedActorCellMetrics] + + cellMetrics.routerMetricIdentity = metricIdentity + cellMetrics.routerMetricsRecorder = metricsExtension.register(metricIdentity, RouterMetrics.Factory) + } + + @Pointcut("execution(* akka.routing.RoutedActorCell.sendMessage(*)) && this(cell) && args(envelope)") + def sendMessageInRouterActorCell(cell: RoutedActorCell, envelope: Envelope) = {} + + @Around("sendMessageInRouterActorCell(cell, envelope)") + def aroundSendMessageInRouterActorCell(pjp: ProceedingJoinPoint, cell: RoutedActorCell, envelope: Envelope): Any = { + val cellMetrics = cell.asInstanceOf[RoutedActorCellMetrics] + val timestampBeforeProcessing = System.nanoTime() + val contextAndTimestamp = envelope.asInstanceOf[TimestampedTraceContextAware] + + try { + TraceRecorder.withInlineTraceContextReplacement(contextAndTimestamp.traceContext) { + + // The router metrics recorder will only be picked up if the message is sent from a tracked router. + RouterAwareEnvelope.dynamicRouterMetricsRecorder.withValue(cellMetrics.routerMetricsRecorder) { + pjp.proceed() + } + } + } finally { + cellMetrics.routerMetricsRecorder map { routerRecorder ⇒ + routerRecorder.routingTime.record(System.nanoTime() - timestampBeforeProcessing) + } + } + } +} + +trait ActorCellMetrics { + var actorMetricIdentity: ActorMetrics = _ + var actorMetricsRecorder: Option[ActorMetricsRecorder] = _ +} + +trait RoutedActorCellMetrics { + var routerMetricIdentity: RouterMetrics = _ + var routerMetricsRecorder: Option[RouterMetricsRecorder] = _ +} + +trait RouterAwareEnvelope { + def routerMetricsRecorder: Option[RouterMetricsRecorder] +} + +object RouterAwareEnvelope { + import scala.util.DynamicVariable + private[kamon] val dynamicRouterMetricsRecorder = new DynamicVariable[Option[RouterMetricsRecorder]](None) + + def default: RouterAwareEnvelope = new RouterAwareEnvelope { + val routerMetricsRecorder: Option[RouterMetricsRecorder] = dynamicRouterMetricsRecorder.value + } +} + +@Aspect +class MetricsIntoActorCellsMixin { + + @DeclareMixin("akka.actor.ActorCell") + def mixinActorCellMetricsToActorCell: ActorCellMetrics = new ActorCellMetrics {} + + @DeclareMixin("akka.routing.RoutedActorCell") + def mixinActorCellMetricsToRoutedActorCell: RoutedActorCellMetrics = new RoutedActorCellMetrics {} + +} + +@Aspect +class TraceContextIntoEnvelopeMixin { + + @DeclareMixin("akka.dispatch.Envelope") + def mixinTraceContextAwareToEnvelope: TimestampedTraceContextAware = TimestampedTraceContextAware.default + + @DeclareMixin("akka.dispatch.Envelope") + def mixinRouterAwareToEnvelope: RouterAwareEnvelope = RouterAwareEnvelope.default + + @Pointcut("execution(akka.dispatch.Envelope.new(..)) && this(ctx)") + def envelopeCreation(ctx: TimestampedTraceContextAware): Unit = {} + + @After("envelopeCreation(ctx)") + def afterEnvelopeCreation(ctx: TimestampedTraceContextAware with RouterAwareEnvelope): Unit = { + // Necessary to force the initialization of ContextAware at the moment of creation. + ctx.traceContext + ctx.routerMetricsRecorder + } +} \ No newline at end of file diff --git a/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorLoggingInstrumentation.scala b/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorLoggingInstrumentation.scala new file mode 100644 index 00000000..e0e5d316 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorLoggingInstrumentation.scala @@ -0,0 +1,50 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package akka.kamon.instrumentation + +import kamon.trace.logging.MdcKeysSupport +import kamon.trace.{ TraceContextAware, TraceRecorder } +import org.aspectj.lang.ProceedingJoinPoint +import org.aspectj.lang.annotation._ + +@Aspect +class ActorLoggingInstrumentation extends MdcKeysSupport { + + @DeclareMixin("akka.event.Logging.LogEvent+") + def mixinTraceContextAwareToLogEvent: TraceContextAware = TraceContextAware.default + + @Pointcut("execution(akka.event.Logging.LogEvent+.new(..)) && this(event)") + def logEventCreation(event: TraceContextAware): Unit = {} + + @After("logEventCreation(event)") + def captureTraceContext(event: TraceContextAware): Unit = { + // Force initialization of TraceContextAware + event.traceContext + } + + @Pointcut("execution(* akka.event.slf4j.Slf4jLogger.withMdc(..)) && args(logSource, logEvent, logStatement)") + def withMdcInvocation(logSource: String, logEvent: TraceContextAware, logStatement: () ⇒ _): Unit = {} + + @Around("withMdcInvocation(logSource, logEvent, logStatement)") + def aroundWithMdcInvocation(pjp: ProceedingJoinPoint, logSource: String, logEvent: TraceContextAware, logStatement: () ⇒ _): Unit = { + TraceRecorder.withInlineTraceContextReplacement(logEvent.traceContext) { + withMdc { + pjp.proceed() + } + } + } +} diff --git a/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentation.scala b/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentation.scala new file mode 100644 index 00000000..48016876 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentation.scala @@ -0,0 +1,80 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package akka.kamon.instrumentation + +import akka.dispatch.sysmsg.EarliestFirstSystemMessageList +import kamon.trace.{ TraceContextAware, TraceRecorder } +import org.aspectj.lang.ProceedingJoinPoint +import org.aspectj.lang.annotation._ + +@Aspect +class ActorSystemMessageInstrumentation { + + @Pointcut("execution(* akka.actor.ActorCell.invokeAll$1(..)) && args(messages, *)") + def systemMessageProcessing(messages: EarliestFirstSystemMessageList): Unit = {} + + @Around("systemMessageProcessing(messages)") + def aroundSystemMessageInvoke(pjp: ProceedingJoinPoint, messages: EarliestFirstSystemMessageList): Any = { + if (messages.nonEmpty) { + val ctx = messages.head.asInstanceOf[TraceContextAware].traceContext + TraceRecorder.withInlineTraceContextReplacement(ctx)(pjp.proceed()) + + } else pjp.proceed() + } +} + +@Aspect +class TraceContextIntoSystemMessageMixin { + + @DeclareMixin("akka.dispatch.sysmsg.SystemMessage+") + def mixinTraceContextAwareToSystemMessage: TraceContextAware = TraceContextAware.default + + @Pointcut("execution(akka.dispatch.sysmsg.SystemMessage+.new(..)) && this(ctx)") + def envelopeCreation(ctx: TraceContextAware): Unit = {} + + @After("envelopeCreation(ctx)") + def afterEnvelopeCreation(ctx: TraceContextAware): Unit = { + // Necessary to force the initialization of ContextAware at the moment of creation. + ctx.traceContext + } +} + +@Aspect +class TraceContextIntoRepointableActorRefMixin { + + @DeclareMixin("akka.actor.RepointableActorRef") + def mixinTraceContextAwareToRepointableActorRef: TraceContextAware = TraceContextAware.default + + @Pointcut("execution(akka.actor.RepointableActorRef.new(..)) && this(ctx)") + def envelopeCreation(ctx: TraceContextAware): Unit = {} + + @After("envelopeCreation(ctx)") + def afterEnvelopeCreation(ctx: TraceContextAware): Unit = { + // Necessary to force the initialization of ContextAware at the moment of creation. + ctx.traceContext + } + + @Pointcut("execution(* akka.actor.RepointableActorRef.point(..)) && this(repointableActorRef)") + def repointableActorRefCreation(repointableActorRef: TraceContextAware): Unit = {} + + @Around("repointableActorRefCreation(repointableActorRef)") + def afterRepointableActorRefCreation(pjp: ProceedingJoinPoint, repointableActorRef: TraceContextAware): Any = { + TraceRecorder.withInlineTraceContextReplacement(repointableActorRef.traceContext) { + pjp.proceed() + } + } +} \ No newline at end of file diff --git a/kamon-akka/src/main/scala/kamon/instrumentation/akka/AskPatternInstrumentation.scala b/kamon-akka/src/main/scala/kamon/instrumentation/akka/AskPatternInstrumentation.scala new file mode 100644 index 00000000..ebddbfc8 --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/instrumentation/akka/AskPatternInstrumentation.scala @@ -0,0 +1,83 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package akka.kamon.instrumentation + +import kamon.Kamon +import kamon.akka.Akka +import kamon.trace.{ TraceRecorder, TraceContext, EmptyTraceContext, TraceContextAware } +import akka.actor.{ ActorSystem, ActorRef } +import akka.event.Logging.Warning +import akka.pattern.AskTimeoutException +import org.aspectj.lang.ProceedingJoinPoint +import org.aspectj.lang.annotation._ +import org.aspectj.lang.reflect.SourceLocation +import scala.concurrent.Future +import scala.compat.Platform.EOL + +@Aspect +class AskPatternInstrumentation { + + import AskPatternInstrumentation._ + + @Pointcut("call(* akka.pattern.AskableActorRef$.$qmark$extension(..)) && args(actor, *, *)") + def askableActorRefAsk(actor: ActorRef): Unit = {} + + @Around("askableActorRefAsk(actor)") + def hookAskTimeoutWarning(pjp: ProceedingJoinPoint, actor: ActorRef): AnyRef = + TraceRecorder.withTraceContextAndSystem { (ctx, system) ⇒ + val akkaExtension = Kamon(Akka)(system) + val future = pjp.proceed().asInstanceOf[Future[AnyRef]] + + val handler = akkaExtension.askPatternTimeoutWarning match { + case "off" ⇒ None + case "lightweight" ⇒ Some(errorHandler(callInfo = Some(CallInfo(s"${actor.path.name} ?", pjp.getSourceLocation)))(system)) + case "heavyweight" ⇒ Some(errorHandler(stack = Some(new StackTraceCaptureException))(system)) + } + + handler.map(future.onFailure(_)(akkaExtension.dispatcher)) + future + + } getOrElse (pjp.proceed()) + + def errorHandler(callInfo: Option[CallInfo] = None, stack: Option[StackTraceCaptureException] = None)(implicit system: ActorSystem): ErrorHandler = { + case e: AskTimeoutException ⇒ + val message = { + if (stack.isDefined) stack.map(s ⇒ s.getStackTrace.drop(3).mkString("", EOL, EOL)) + else callInfo.map(_.message) + } + publish(message) + } + + def publish(message: Option[String])(implicit system: ActorSystem) = message map { msg ⇒ + system.eventStream.publish(Warning("AskPatternTracing", classOf[AskPatternInstrumentation], + s"Timeout triggered for ask pattern registered at: $msg")) + } +} + +object AskPatternInstrumentation { + type ErrorHandler = PartialFunction[Throwable, Unit] + + class StackTraceCaptureException extends Throwable + + case class CallInfo(name: String, sourceLocation: SourceLocation) { + def message: String = { + def locationInfo: String = Option(sourceLocation).map(location ⇒ s"${location.getFileName}:${location.getLine}").getOrElse("") + def line: String = s"$name @ $locationInfo" + s"$line" + } + } +} \ No newline at end of file diff --git a/kamon-akka/src/main/scala/kamon/instrumentation/akka/DispatcherInstrumentation.scala b/kamon-akka/src/main/scala/kamon/instrumentation/akka/DispatcherInstrumentation.scala new file mode 100644 index 00000000..8280edca --- /dev/null +++ b/kamon-akka/src/main/scala/kamon/instrumentation/akka/DispatcherInstrumentation.scala @@ -0,0 +1,164 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package akka.kamon.instrumentation + +import java.lang.reflect.Method +import java.util.concurrent.ThreadPoolExecutor + +import akka.actor.{ ActorSystemImpl, Cancellable } +import akka.dispatch.{ Dispatcher, Dispatchers, ExecutorServiceDelegate, MessageDispatcher } +import akka.kamon.instrumentation.DispatcherMetricsCollector.DispatcherMetricsMeasurement +import kamon.Kamon +import kamon.akka.DispatcherMetrics +import DispatcherMetrics.DispatcherMetricRecorder +import kamon.metric.Metrics +import org.aspectj.lang.annotation._ + +import scala.concurrent.forkjoin.ForkJoinPool + +@Aspect +class DispatcherInstrumentation { + + @Pointcut("execution(akka.dispatch.Dispatchers.new(..)) && this(dispatchers) && cflow(execution(akka.actor.ActorSystemImpl.new(..)) && this(system))") + def onActorSystemStartup(dispatchers: Dispatchers, system: ActorSystemImpl) = {} + + @Before("onActorSystemStartup(dispatchers, system)") + def beforeActorSystemStartup(dispatchers: Dispatchers, system: ActorSystemImpl): Unit = { + val currentDispatchers = dispatchers.asInstanceOf[DispatchersWithActorSystem] + currentDispatchers.actorSystem = system + } + + @Pointcut("execution(* akka.dispatch.Dispatchers.lookup(..)) && this(dispatchers)") + def onDispatchersLookup(dispatchers: Dispatchers) = {} + + @AfterReturning(pointcut = "onDispatchersLookup(dispatchers)", returning = "dispatcher") + def afterReturningLookup(dispatchers: Dispatchers, dispatcher: Dispatcher): Unit = { + val dispatchersWithActorSystem = dispatchers.asInstanceOf[DispatchersWithActorSystem] + val dispatcherWithMetrics = dispatcher.asInstanceOf[DispatcherMetricCollectionInfo] + + dispatcherWithMetrics.actorSystem = dispatchersWithActorSystem.actorSystem + } + + @Pointcut("call(* akka.dispatch.ExecutorServiceFactory.createExecutorService(..))") + def onCreateExecutorService(): Unit = {} + + @Pointcut("cflow((execution(* akka.dispatch.MessageDispatcher.registerForExecution(..)) || execution(* akka.dispatch.MessageDispatcher.executeTask(..))) && this(dispatcher))") + def onCflowMessageDispatcher(dispatcher: Dispatcher): Unit = {} + + @Pointcut("onCreateExecutorService() && onCflowMessageDispatcher(dispatcher)") + def onDispatcherStartup(dispatcher: Dispatcher): Unit = {} + + @After("onDispatcherStartup(dispatcher)") + def afterDispatcherStartup(dispatcher: MessageDispatcher): Unit = { + + val dispatcherWithMetrics = dispatcher.asInstanceOf[DispatcherMetricCollectionInfo] + val metricsExtension = Kamon(Metrics)(dispatcherWithMetrics.actorSystem) + val metricIdentity = DispatcherMetrics(dispatcher.id) + + dispatcherWithMetrics.metricIdentity = metricIdentity + dispatcherWithMetrics.dispatcherMetricsRecorder = metricsExtension.register(metricIdentity, DispatcherMetrics.Factory) + + if (dispatcherWithMetrics.dispatcherMetricsRecorder.isDefined) { + dispatcherWithMetrics.dispatcherCollectorCancellable = metricsExtension.scheduleGaugeRecorder { + dispatcherWithMetrics.dispatcherMetricsRecorder.map { + dm ⇒ + val DispatcherMetricsMeasurement(maximumPoolSize, runningThreadCount, queueTaskCount, poolSize) = + DispatcherMetricsCollector.collect(dispatcher) + + dm.maximumPoolSize.record(maximumPoolSize) + dm.runningThreadCount.record(runningThreadCount) + dm.queueTaskCount.record(queueTaskCount) + dm.poolSize.record(poolSize) + } + } + } + } + + @Pointcut("execution(* akka.dispatch.MessageDispatcher.shutdown(..)) && this(dispatcher)") + def onDispatcherShutdown(dispatcher: MessageDispatcher): Unit = {} + + @After("onDispatcherShutdown(dispatcher)") + def afterDispatcherShutdown(dispatcher: MessageDispatcher): Unit = { + val dispatcherWithMetrics = dispatcher.asInstanceOf[DispatcherMetricCollectionInfo] + + dispatcherWithMetrics.dispatcherMetricsRecorder.map { + dispatcher ⇒ + dispatcherWithMetrics.dispatcherCollectorCancellable.cancel() + Kamon(Metrics)(dispatcherWithMetrics.actorSystem).unregister(dispatcherWithMetrics.metricIdentity) + } + } +} + +@Aspect +class DispatcherMetricCollectionInfoIntoDispatcherMixin { + + @DeclareMixin("akka.dispatch.MessageDispatcher") + def mixinDispatcherMetricsToMessageDispatcher: DispatcherMetricCollectionInfo = new DispatcherMetricCollectionInfo {} + + @DeclareMixin("akka.dispatch.Dispatchers") + def mixinDispatchersToDispatchersWithActorSystem: DispatchersWithActorSystem = new DispatchersWithActorSystem {} +} + +trait DispatcherMetricCollectionInfo { + var metricIdentity: DispatcherMetrics = _ + var dispatcherMetricsRecorder: Option[DispatcherMetricRecorder] = _ + var dispatcherCollectorCancellable: Cancellable = _ + var actorSystem: ActorSystemImpl = _ +} + +trait DispatchersWithActorSystem { + var actorSystem: ActorSystemImpl = _ +} + +object DispatcherMetricsCollector { + + case class DispatcherMetricsMeasurement(maximumPoolSize: Long, runningThreadCount: Long, queueTaskCount: Long, poolSize: Long) + + private def collectForkJoinMetrics(pool: ForkJoinPool): DispatcherMetricsMeasurement = { + DispatcherMetricsMeasurement(pool.getParallelism, pool.getActiveThreadCount, + (pool.getQueuedTaskCount + pool.getQueuedSubmissionCount), pool.getPoolSize) + } + + private def collectExecutorMetrics(pool: ThreadPoolExecutor): DispatcherMetricsMeasurement = { + DispatcherMetricsMeasurement(pool.getMaximumPoolSize, pool.getActiveCount, pool.getQueue.size(), pool.getPoolSize) + } + + private val executorServiceMethod: Method = { + // executorService is protected + val method = classOf[Dispatcher].getDeclaredMethod("executorService") + method.setAccessible(true) + method + } + + def collect(dispatcher: MessageDispatcher): DispatcherMetricsMeasurement = { + dispatcher match { + case x: Dispatcher ⇒ { + val executor = executorServiceMethod.invoke(x) match { + case delegate: ExecutorServiceDelegate ⇒ delegate.executor + case other ⇒ other + } + + executor match { + case fjp: ForkJoinPool ⇒ collectForkJoinMetrics(fjp) + case tpe: ThreadPoolExecutor ⇒ collectExecutorMetrics(tpe) + case anything ⇒ DispatcherMetricsMeasurement(0L, 0L, 0L, 0L) + } + } + case _ ⇒ new DispatcherMetricsMeasurement(0L, 0L, 0L, 0L) + } + } +} diff --git a/kamon-akka/src/test/resources/logback.xml b/kamon-akka/src/test/resources/logback.xml new file mode 100644 index 00000000..10c9aa35 --- /dev/null +++ b/kamon-akka/src/test/resources/logback.xml @@ -0,0 +1,14 @@ + + + + + + %date{HH:mm:ss.SSS} %-5level [%traceToken][%thread] %logger{55} - %msg%n + + + + + + + + diff --git a/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorCellInstrumentationSpec.scala b/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorCellInstrumentationSpec.scala new file mode 100644 index 00000000..06a232bd --- /dev/null +++ b/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorCellInstrumentationSpec.scala @@ -0,0 +1,125 @@ +/* =================================================== + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ========================================================== */ +package kamon.instrumentation.akka + +import akka.actor.{ Actor, ActorSystem, Props } +import akka.pattern.{ ask, pipe } +import akka.routing._ +import akka.testkit.{ TestKitBase, ImplicitSender, TestKit } +import akka.util.Timeout +import com.typesafe.config.ConfigFactory +import kamon.trace.TraceRecorder +import org.scalatest.{ BeforeAndAfterAll, WordSpecLike } + +import scala.concurrent.duration._ + +class ActorCellInstrumentationSpec extends TestKitBase with WordSpecLike with ImplicitSender with BeforeAndAfterAll { + implicit lazy val system: ActorSystem = ActorSystem("actor-cell-instrumentation-spec") + implicit val executionContext = system.dispatcher + + "the message passing instrumentation" should { + "propagate the TraceContext using bang" in new EchoActorFixture { + val testTraceContext = TraceRecorder.withNewTraceContext("bang-reply") { + ctxEchoActor ! "test" + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + + "propagate the TraceContext using tell" in new EchoActorFixture { + val testTraceContext = TraceRecorder.withNewTraceContext("tell-reply") { + ctxEchoActor.tell("test", testActor) + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + + "propagate the TraceContext using ask" in new EchoActorFixture { + implicit val timeout = Timeout(1 seconds) + val testTraceContext = TraceRecorder.withNewTraceContext("ask-reply") { + // The pipe pattern use Futures internally, so FutureTracing test should cover the underpinnings of it. + (ctxEchoActor ? "test") pipeTo (testActor) + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + + "propagate the TraceContext to actors behind a simple router" in new EchoSimpleRouterFixture { + val testTraceContext = TraceRecorder.withNewTraceContext("router-reply") { + router.route("test", testActor) + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + + "propagate the TraceContext to actors behind a pool router" in new EchoPoolRouterFixture { + val testTraceContext = TraceRecorder.withNewTraceContext("router-reply") { + pool ! "test" + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + + "propagate the TraceContext to actors behind a group router" in new EchoGroupRouterFixture { + val testTraceContext = TraceRecorder.withNewTraceContext("router-reply") { + group ! "test" + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + } + + override protected def afterAll(): Unit = shutdown() + + trait EchoActorFixture { + val ctxEchoActor = system.actorOf(Props[TraceContextEcho]) + } + + trait EchoSimpleRouterFixture { + val router = { + val routees = Vector.fill(5) { + val r = system.actorOf(Props[TraceContextEcho]) + ActorRefRoutee(r) + } + Router(RoundRobinRoutingLogic(), routees) + } + } + + trait EchoPoolRouterFixture { + val pool = system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[TraceContextEcho]), "pool-router") + } + + trait EchoGroupRouterFixture { + val routees = Vector.fill(5) { + system.actorOf(Props[TraceContextEcho]) + } + + val group = system.actorOf(RoundRobinGroup(routees.map(_.path.toStringWithoutAddress)).props(), "group-router") + } +} + +class TraceContextEcho extends Actor { + def receive = { + case msg: String ⇒ sender ! TraceRecorder.currentContext + } +} + diff --git a/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorLoggingInstrumentationSpec.scala b/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorLoggingInstrumentationSpec.scala new file mode 100644 index 00000000..598e9327 --- /dev/null +++ b/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorLoggingInstrumentationSpec.scala @@ -0,0 +1,71 @@ +/* =================================================== + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ========================================================== */ +package kamon.instrumentation.akka + +import akka.actor.{ Actor, ActorLogging, ActorSystem, Props } +import akka.event.Logging.LogEvent +import akka.testkit.TestKitBase +import com.typesafe.config.ConfigFactory +import kamon.trace.TraceLocal.AvailableToMdc +import kamon.trace.logging.MdcKeysSupport +import kamon.trace.{ TraceContextAware, TraceLocal, TraceRecorder } +import org.scalatest.{ BeforeAndAfterAll, Inspectors, Matchers, WordSpecLike } +import org.slf4j.MDC + +class ActorLoggingInstrumentationSpec extends TestKitBase with WordSpecLike with Matchers with Inspectors with MdcKeysSupport with BeforeAndAfterAll { + implicit lazy val system: ActorSystem = ActorSystem("actor-logging-instrumentation-spec", + ConfigFactory.parseString("""akka.loggers = ["akka.event.slf4j.Slf4jLogger"]""")) + + "the ActorLogging instrumentation" should { + "attach the TraceContext (if available) to log events" in { + val loggerActor = system.actorOf(Props[LoggerActor]) + system.eventStream.subscribe(testActor, classOf[LogEvent]) + + val testTraceContext = TraceRecorder.withNewTraceContext("logging") { + loggerActor ! "info" + TraceRecorder.currentContext + } + + fishForMessage() { + case event: LogEvent if event.message.toString startsWith "TraceContext" ⇒ + val ctxInEvent = event.asInstanceOf[TraceContextAware].traceContext + ctxInEvent === testTraceContext + + case event: LogEvent ⇒ false + } + } + + "allow retrieve a value from the MDC when was created a key of type AvailableToMdc" in { + val testString = "Hello World" + TraceRecorder.withNewTraceContext("logging-with-mdc") { + TraceLocal.store(AvailableToMdc("some-cool-key"))(testString) + + withMdc { + MDC.get("other-key") shouldBe (null) + MDC.get("some-cool-key") should equal(testString) + } + } + } + } + + override protected def afterAll(): Unit = shutdown() +} + +class LoggerActor extends Actor with ActorLogging { + def receive = { + case "info" ⇒ log.info("TraceContext(name = {}, token = {})", TraceRecorder.currentContext.name, TraceRecorder.currentContext.token) + } +} diff --git a/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentationSpec.scala b/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentationSpec.scala new file mode 100644 index 00000000..0e9025af --- /dev/null +++ b/kamon-akka/src/test/scala/kamon/instrumentation/akka/ActorSystemMessageInstrumentationSpec.scala @@ -0,0 +1,176 @@ +package kamon.instrumentation.akka + +import akka.actor.SupervisorStrategy.{ Escalate, Restart, Resume, Stop } +import akka.actor._ +import akka.testkit.{ TestKitBase, ImplicitSender } +import com.typesafe.config.ConfigFactory +import kamon.trace.{ EmptyTraceContext, TraceRecorder } +import org.scalatest.WordSpecLike + +import scala.concurrent.duration._ +import scala.util.control.NonFatal + +class ActorSystemMessageInstrumentationSpec extends TestKitBase with WordSpecLike with ImplicitSender { + implicit lazy val system: ActorSystem = ActorSystem("actor-system-message-instrumentation-spec", ConfigFactory.parseString( + """ + |akka.loglevel = OFF + """.stripMargin)) + + implicit val executionContext = system.dispatcher + + "the system message passing instrumentation" should { + "keep the TraceContext while processing the Create message in top level actors" in { + val testTraceContext = TraceRecorder.withNewTraceContext("creating-top-level-actor") { + system.actorOf(Props(new Actor { + testActor ! TraceRecorder.currentContext + def receive: Actor.Receive = { case any ⇒ } + })) + + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + + "keep the TraceContext while processing the Create message in non top level actors" in { + val testTraceContext = TraceRecorder.withNewTraceContext("creating-non-top-level-actor") { + system.actorOf(Props(new Actor { + def receive: Actor.Receive = { + case any ⇒ + context.actorOf(Props(new Actor { + testActor ! TraceRecorder.currentContext + def receive: Actor.Receive = { case any ⇒ } + })) + } + })) ! "any" + + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) + } + + "keep the TraceContext in the supervision cycle" when { + "the actor is resumed" in { + val supervisor = supervisorWithDirective(Resume) + + val testTraceContext = TraceRecorder.withNewTraceContext("fail-and-resume") { + supervisor ! "fail" + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) // From the parent executing the supervision strategy + + // Ensure we didn't tie the actor with the context + supervisor ! "context" + expectMsg(EmptyTraceContext) + } + + "the actor is restarted" in { + val supervisor = supervisorWithDirective(Restart, sendPreRestart = true, sendPostRestart = true) + + val testTraceContext = TraceRecorder.withNewTraceContext("fail-and-restart") { + supervisor ! "fail" + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) // From the parent executing the supervision strategy + expectMsg(testTraceContext) // From the preRestart hook + expectMsg(testTraceContext) // From the postRestart hook + + // Ensure we didn't tie the actor with the context + supervisor ! "context" + expectMsg(EmptyTraceContext) + } + + "the actor is stopped" in { + val supervisor = supervisorWithDirective(Stop, sendPostStop = true) + + val testTraceContext = TraceRecorder.withNewTraceContext("fail-and-stop") { + supervisor ! "fail" + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) // From the parent executing the supervision strategy + expectMsg(testTraceContext) // From the postStop hook + expectNoMsg(1 second) + } + + "the failure is escalated" in { + val supervisor = supervisorWithDirective(Escalate, sendPostStop = true) + + val testTraceContext = TraceRecorder.withNewTraceContext("fail-and-escalate") { + supervisor ! "fail" + TraceRecorder.currentContext + } + + expectMsg(testTraceContext) // From the parent executing the supervision strategy + expectMsg(testTraceContext) // From the grandparent executing the supervision strategy + expectMsg(testTraceContext) // From the postStop hook in the child + expectMsg(testTraceContext) // From the postStop hook in the parent + expectNoMsg(1 second) + } + } + } + + def supervisorWithDirective(directive: SupervisorStrategy.Directive, sendPreRestart: Boolean = false, sendPostRestart: Boolean = false, + sendPostStop: Boolean = false, sendPreStart: Boolean = false): ActorRef = { + class GrandParent extends Actor { + val child = context.actorOf(Props(new Parent)) + + override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy() { + case NonFatal(throwable) ⇒ testActor ! TraceRecorder.currentContext; Stop + } + + def receive = { + case any ⇒ child forward any + } + } + + class Parent extends Actor { + val child = context.actorOf(Props(new Child)) + + override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy() { + case NonFatal(throwable) ⇒ testActor ! TraceRecorder.currentContext; directive + } + + def receive: Actor.Receive = { + case any ⇒ child forward any + } + + override def postStop(): Unit = { + if (sendPostStop) testActor ! TraceRecorder.currentContext + super.postStop() + } + } + + class Child extends Actor { + def receive = { + case "fail" ⇒ throw new ArithmeticException("Division by zero.") + case "context" ⇒ sender ! TraceRecorder.currentContext + } + + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + if (sendPreRestart) testActor ! TraceRecorder.currentContext + super.preRestart(reason, message) + } + + override def postRestart(reason: Throwable): Unit = { + if (sendPostRestart) testActor ! TraceRecorder.currentContext + super.postRestart(reason) + } + + override def postStop(): Unit = { + if (sendPostStop) testActor ! TraceRecorder.currentContext + super.postStop() + } + + override def preStart(): Unit = { + if (sendPreStart) testActor ! TraceRecorder.currentContext + super.preStart() + } + } + + system.actorOf(Props(new GrandParent)) + } +} diff --git a/kamon-akka/src/test/scala/kamon/instrumentation/akka/AskPatternInstrumentationSpec.scala b/kamon-akka/src/test/scala/kamon/instrumentation/akka/AskPatternInstrumentationSpec.scala new file mode 100644 index 00000000..5c9905ba --- /dev/null +++ b/kamon-akka/src/test/scala/kamon/instrumentation/akka/AskPatternInstrumentationSpec.scala @@ -0,0 +1,134 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.instrumentation.akka + +import java.util.concurrent.atomic.AtomicInteger + +import akka.actor._ +import akka.event.Logging.Warning +import akka.pattern.ask +import akka.testkit.{ TestProbe, TestKitBase } +import akka.util.Timeout +import com.typesafe.config.ConfigFactory +import kamon.Kamon +import kamon.akka.Akka +import kamon.trace.{ TraceContext, TraceContextAware, TraceRecorder } +import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } + +import scala.concurrent.duration._ + +class AskPatternInstrumentationSpec extends TestKitBase with WordSpecLike with Matchers with BeforeAndAfterAll { + implicit lazy val system: ActorSystem = ActorSystem("ask-pattern-tracing-spec", + ConfigFactory.parseString("""akka.loggers = ["akka.event.slf4j.Slf4jLogger"]""")) + + implicit val ec = system.dispatcher + implicit val askTimeout = Timeout(10 millis) + + // TODO: Make this work with ActorSelections + + "the AskPatternInstrumentation" when { + "configured in heavyweight mode" should { + "log a warning with a full stack trace and the TraceContext taken from the moment the ask was triggered for a actor" in new NoReplyFixture { + setAskPatternTimeoutWarningMode("heavyweight") + + expectTimeoutWarning() { + TraceRecorder.withNewTraceContext("ask-timeout-warning") { + noReplyActorRef ? "hello" + TraceRecorder.currentContext + } + } + } + } + + "configured in lightweight mode" should { + "log a warning with a short source location description and the TraceContext taken from the moment the ask was triggered for a actor" in new NoReplyFixture { + setAskPatternTimeoutWarningMode("lightweight") + + expectTimeoutWarning(messageSizeLimit = Some(1)) { + TraceRecorder.withNewTraceContext("ask-timeout-warning") { + noReplyActorRef ? "hello" + TraceRecorder.currentContext + } + } + } + } + + "configured in off mode" should { + "should not log any warning messages" in new NoReplyFixture { + setAskPatternTimeoutWarningMode("off") + + expectTimeoutWarning(expectWarning = false) { + TraceRecorder.withNewTraceContext("ask-timeout-warning") { + noReplyActorRef ? "hello" + TraceRecorder.currentContext + } + } + } + } + } + + override protected def afterAll(): Unit = shutdown() + + def expectTimeoutWarning(messageSizeLimit: Option[Int] = None, expectWarning: Boolean = true)(thunk: ⇒ TraceContext): Unit = { + val listener = warningListener() + val testTraceContext = thunk + + if (expectWarning) { + val warning = listener.fishForMessage() { + case Warning(_, _, msg) if msg.toString.startsWith("Timeout triggered for ask pattern registered at") ⇒ true + case others ⇒ false + }.asInstanceOf[Warning] + + warning.asInstanceOf[TraceContextAware].traceContext should equal(testTraceContext) + messageSizeLimit.map { messageLimit ⇒ + warning.message.toString.lines.size should be(messageLimit) + } + } else { + listener.expectNoMsg() + } + } + + def warningListener(): TestProbe = { + val listener = TestProbe() + system.eventStream.subscribe(listener.ref, classOf[Warning]) + listener + } + + def setAskPatternTimeoutWarningMode(mode: String): Unit = { + val target = Kamon(Akka)(system) + val field = target.getClass.getDeclaredField("askPatternTimeoutWarning") + field.setAccessible(true) + field.set(target, mode) + } + + val fixtureCounter = new AtomicInteger(0) + + trait NoReplyFixture { + def noReplyActorRef: ActorRef = system.actorOf(Props[NoReply], "no-reply-" + fixtureCounter.incrementAndGet()) + + def noReplyActorSelection: ActorSelection = { + val target = noReplyActorRef + system.actorSelection(target.path) + } + } +} + +class NoReply extends Actor { + def receive = { + case any ⇒ + } +} diff --git a/kamon-akka/src/test/scala/kamon/metric/ActorMetricsSpec.scala b/kamon-akka/src/test/scala/kamon/metric/ActorMetricsSpec.scala new file mode 100644 index 00000000..6d16386b --- /dev/null +++ b/kamon-akka/src/test/scala/kamon/metric/ActorMetricsSpec.scala @@ -0,0 +1,228 @@ +/* ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import java.nio.LongBuffer + +import kamon.Kamon +import kamon.akka.ActorMetrics +import kamon.metric.ActorMetricsTestActor._ +import org.scalatest.{ BeforeAndAfterAll, WordSpecLike, Matchers } +import akka.testkit.{ ImplicitSender, TestProbe, TestKitBase } +import akka.actor._ +import com.typesafe.config.ConfigFactory +import scala.concurrent.duration._ +import ActorMetrics.{ ActorMetricsRecorder, ActorMetricSnapshot } + +class ActorMetricsSpec extends TestKitBase with WordSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll { + implicit lazy val system: ActorSystem = ActorSystem("actor-metrics-spec", ConfigFactory.parseString( + """ + |kamon.metrics { + | tick-interval = 1 hour + | default-collection-context-buffer-size = 10 + | + | filters = [ + | { + | actor { + | includes = [ "user/tracked-*", "user/measuring-*", "user/clean-after-collect", "user/stop" ] + | excludes = [ "user/tracked-explicitly-excluded"] + | } + | } + | ] + | precision.actor { + | processing-time { + | highest-trackable-value = 3600000000000 + | significant-value-digits = 2 + | } + | + | time-in-mailbox { + | highest-trackable-value = 3600000000000 + | significant-value-digits = 2 + | } + | + | mailbox-size { + | refresh-interval = 1 hour + | highest-trackable-value = 999999999 + | significant-value-digits = 2 + | } + | } + |} + | + |akka.loglevel = OFF + | + """.stripMargin)) + + "the Kamon actor metrics" should { + "respect the configured include and exclude filters" in new ActorMetricsFixtures { + val trackedActor = createTestActor("tracked-actor") + actorMetricsRecorderOf(trackedActor) should not be empty + + val nonTrackedActor = createTestActor("non-tracked-actor") + actorMetricsRecorderOf(nonTrackedActor) shouldBe empty + + val trackedButExplicitlyExcluded = createTestActor("tracked-explicitly-excluded") + actorMetricsRecorderOf(trackedButExplicitlyExcluded) shouldBe empty + } + + "reset all recording instruments after taking a snapshot" in new ActorMetricsFixtures { + val trackedActor = createTestActor("clean-after-collect") + + for (_ ← 1 to 100) { + for (i ← 1 to 100) { + trackedActor ! Discard + } + trackedActor ! Fail + trackedActor ! Ping + expectMsg(Pong) + + val firstSnapshot = collectMetricsOf(trackedActor).get + firstSnapshot.errors.count should be(1L) + firstSnapshot.mailboxSize.numberOfMeasurements should be > 0L + firstSnapshot.processingTime.numberOfMeasurements should be(102L) // 102 examples + firstSnapshot.timeInMailbox.numberOfMeasurements should be(102L) // 102 examples + + val secondSnapshot = collectMetricsOf(trackedActor).get // Ensure that the recorders are clean + secondSnapshot.errors.count should be(0L) + secondSnapshot.mailboxSize.numberOfMeasurements should be(3L) // min, max and current + secondSnapshot.processingTime.numberOfMeasurements should be(0L) + secondSnapshot.timeInMailbox.numberOfMeasurements should be(0L) + } + } + + "record the processing-time of the receive function" in new ActorMetricsFixtures { + val trackedActor = createTestActor("measuring-processing-time") + + trackedActor ! TrackTimings(sleep = Some(100 millis)) + val timings = expectMsgType[TrackedTimings] + val snapshot = collectMetricsOf(trackedActor).get + + snapshot.processingTime.numberOfMeasurements should be(1L) + snapshot.processingTime.recordsIterator.next().count should be(1L) + snapshot.processingTime.recordsIterator.next().level should be(timings.approximateProcessingTime +- 10.millis.toNanos) + } + + "record the number of errors" in new ActorMetricsFixtures { + val trackedActor = createTestActor("measuring-errors") + + for (i ← 1 to 10) { trackedActor ! Fail } + trackedActor ! Ping + expectMsg(Pong) + val snapshot = collectMetricsOf(trackedActor).get + + snapshot.errors.count should be(10) + } + + "record the mailbox-size" in new ActorMetricsFixtures { + val trackedActor = createTestActor("measuring-mailbox-size") + + trackedActor ! TrackTimings(sleep = Some(100 millis)) + for (i ← 1 to 10) { + trackedActor ! Discard + } + trackedActor ! Ping + + val timings = expectMsgType[TrackedTimings] + expectMsg(Pong) + val snapshot = collectMetricsOf(trackedActor).get + + snapshot.mailboxSize.min should be(0L) + snapshot.mailboxSize.max should be(11L +- 1L) + } + + "record the time-in-mailbox" in new ActorMetricsFixtures { + val trackedActor = createTestActor("measuring-time-in-mailbox") + + trackedActor ! TrackTimings(sleep = Some(100 millis)) + val timings = expectMsgType[TrackedTimings] + val snapshot = collectMetricsOf(trackedActor).get + + snapshot.timeInMailbox.numberOfMeasurements should be(1L) + snapshot.timeInMailbox.recordsIterator.next().count should be(1L) + snapshot.timeInMailbox.recordsIterator.next().level should be(timings.approximateTimeInMailbox +- 10.millis.toNanos) + } + + "clean up the associated recorder when the actor is stopped" in new ActorMetricsFixtures { + val trackedActor = createTestActor("stop") + + val deathWatcher = TestProbe() + deathWatcher.watch(trackedActor) + trackedActor ! PoisonPill + deathWatcher.expectTerminated(trackedActor) + + actorMetricsRecorderOf(trackedActor) shouldBe empty + } + } + + override protected def afterAll(): Unit = shutdown() + + trait ActorMetricsFixtures { + val collectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(10000) + } + + def actorRecorderName(ref: ActorRef): String = ref.path.elements.mkString("/") + + def actorMetricsRecorderOf(ref: ActorRef): Option[ActorMetricsRecorder] = + Kamon(Metrics)(system).storage.get(ActorMetrics(actorRecorderName(ref))).map(_.asInstanceOf[ActorMetricsRecorder]) + + def collectMetricsOf(ref: ActorRef): Option[ActorMetricSnapshot] = { + Thread.sleep(5) // Just in case the test advances a bit faster than the actor being tested. + actorMetricsRecorderOf(ref).map(_.collect(collectionContext)) + } + + def createTestActor(name: String): ActorRef = { + val actor = system.actorOf(Props[ActorMetricsTestActor], name) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + actor.tell(Ping, initialiseListener.ref) + initialiseListener.expectMsg(Pong) + + // Cleanup all the metric recording instruments: + collectMetricsOf(actor) + + actor + } + } +} + +class ActorMetricsTestActor extends Actor { + def receive = { + case Discard ⇒ + case Fail ⇒ throw new ArithmeticException("Division by zero.") + case Ping ⇒ sender ! Pong + case TrackTimings(sendTimestamp, sleep) ⇒ { + val dequeueTimestamp = System.nanoTime() + sleep.map(s ⇒ Thread.sleep(s.toMillis)) + val afterReceiveTimestamp = System.nanoTime() + + sender ! TrackedTimings(sendTimestamp, dequeueTimestamp, afterReceiveTimestamp) + } + } +} + +object ActorMetricsTestActor { + case object Ping + case object Pong + case object Fail + case object Discard + + case class TrackTimings(sendTimestamp: Long = System.nanoTime(), sleep: Option[Duration] = None) + case class TrackedTimings(sendTimestamp: Long, dequeueTimestamp: Long, afterReceiveTimestamp: Long) { + def approximateTimeInMailbox: Long = dequeueTimestamp - sendTimestamp + def approximateProcessingTime: Long = afterReceiveTimestamp - dequeueTimestamp + } +} diff --git a/kamon-akka/src/test/scala/kamon/metric/DispatcherMetricsSpec.scala b/kamon-akka/src/test/scala/kamon/metric/DispatcherMetricsSpec.scala new file mode 100644 index 00000000..55af3f2e --- /dev/null +++ b/kamon-akka/src/test/scala/kamon/metric/DispatcherMetricsSpec.scala @@ -0,0 +1,110 @@ +/* ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import akka.actor.{ ActorRef, ActorSystem, Props } +import akka.testkit.{ TestKitBase, TestProbe } +import com.typesafe.config.ConfigFactory +import kamon.Kamon +import kamon.akka.DispatcherMetrics +import DispatcherMetrics.DispatcherMetricSnapshot +import kamon.metric.Subscriptions.TickMetricSnapshot +import org.scalatest.{ Matchers, WordSpecLike } + +import scala.concurrent.duration._ + +class DispatcherMetricsSpec extends TestKitBase with WordSpecLike with Matchers { + implicit lazy val system: ActorSystem = ActorSystem("dispatcher-metrics-spec", ConfigFactory.parseString( + """ + |kamon.metrics { + | tick-interval = 1 second + | default-collection-context-buffer-size = 10 + | + | filters = [ + | { + | dispatcher { + | includes = ["*"] + | excludes = ["dispatcher-explicitly-excluded"] + | } + | } + | ] + |} + | + |dispatcher-explicitly-excluded { + | type = "Dispatcher" + | executor = "fork-join-executor" + |} + | + |tracked-dispatcher { + | type = "Dispatcher" + | executor = "thread-pool-executor" + |} + | + """.stripMargin)) + + "the Kamon dispatcher metrics" should { + "respect the configured include and exclude filters" in { + system.actorOf(Props[ActorMetricsTestActor].withDispatcher("tracked-dispatcher"), "actor-with-tracked-dispatcher") + system.actorOf(Props[ActorMetricsTestActor].withDispatcher("dispatcher-explicitly-excluded"), "actor-with-excluded-dispatcher") + + Kamon(Metrics).subscribe(DispatcherMetrics, "*", testActor, permanently = true) + expectMsgType[TickMetricSnapshot] + + within(2 seconds) { + val tickSnapshot = expectMsgType[TickMetricSnapshot] + tickSnapshot.metrics.keys should contain(DispatcherMetrics("tracked-dispatcher")) + tickSnapshot.metrics.keys should not contain (DispatcherMetrics("dispatcher-explicitly-excluded")) + } + } + + "record maximumPoolSize, runningThreadCount, queueTaskCount, poolSize metrics" in new DelayableActorFixture { + val (delayable, metricsListener) = delayableActor("worker-actor", "tracked-dispatcher") + + for (_ ← 1 to 100) { + //delayable ! Discard + } + + val dispatcherMetrics = expectDispatcherMetrics("tracked-dispatcher", metricsListener, 3 seconds) + dispatcherMetrics.maximumPoolSize.max should be <= 64L //fail in travis + dispatcherMetrics.poolSize.max should be <= 22L //fail in travis + dispatcherMetrics.queueTaskCount.max should be(0L) + dispatcherMetrics.runningThreadCount.max should be(0L) + } + + } + + def expectDispatcherMetrics(dispatcherId: String, listener: TestProbe, waitTime: FiniteDuration): DispatcherMetricSnapshot = { + val tickSnapshot = within(waitTime) { + listener.expectMsgType[TickMetricSnapshot] + } + val dispatcherMetricsOption = tickSnapshot.metrics.get(DispatcherMetrics(dispatcherId)) + dispatcherMetricsOption should not be empty + dispatcherMetricsOption.get.asInstanceOf[DispatcherMetricSnapshot] + } + + trait DelayableActorFixture { + def delayableActor(name: String, dispatcher: String): (ActorRef, TestProbe) = { + val actor = system.actorOf(Props[ActorMetricsTestActor].withDispatcher(dispatcher), name) + val metricsListener = TestProbe() + + Kamon(Metrics).subscribe(DispatcherMetrics, "*", metricsListener.ref, permanently = true) + // Wait for one empty snapshot before proceeding to the test. + metricsListener.expectMsgType[TickMetricSnapshot] + + (actor, metricsListener) + } + } +} diff --git a/kamon-akka/src/test/scala/kamon/metric/RouterMetricsSpec.scala b/kamon-akka/src/test/scala/kamon/metric/RouterMetricsSpec.scala new file mode 100644 index 00000000..abc195ba --- /dev/null +++ b/kamon-akka/src/test/scala/kamon/metric/RouterMetricsSpec.scala @@ -0,0 +1,296 @@ +/* ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import java.nio.LongBuffer + +import akka.actor._ +import akka.kamon.instrumentation.ActorCellMetrics +import akka.routing._ +import akka.testkit.{ ImplicitSender, TestKitBase, TestProbe } +import com.typesafe.config.ConfigFactory +import kamon.Kamon +import kamon.akka.{ RouterMetrics, ActorMetrics } +import ActorMetrics.{ ActorMetricSnapshot, ActorMetricsRecorder } +import RouterMetrics._ +import kamon.metric.RouterMetricsTestActor._ +import kamon.metric.Subscriptions.TickMetricSnapshot +import kamon.metric.instrument.{ Counter, Histogram } +import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } + +import scala.concurrent.duration._ + +class RouterMetricsSpec extends TestKitBase with WordSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll { + implicit lazy val system: ActorSystem = ActorSystem("router-metrics-spec", ConfigFactory.parseString( + """ + |kamon.metrics { + | tick-interval = 1 hour + | default-collection-context-buffer-size = 10 + | + | filters = [ + | { + | router { + | includes = [ "user/tracked-*", "user/measuring-*", "user/stop-*" ] + | excludes = [ "user/tracked-explicitly-excluded-*"] + | } + | } + | ] + | precision { + | default-histogram-precision { + | highest-trackable-value = 3600000000000 + | significant-value-digits = 2 + | } + | } + |} + | + |akka.loglevel = OFF + | + """.stripMargin)) + + "the Kamon router metrics" should { + "respect the configured include and exclude filters" in new RouterMetricsFixtures { + createTestPoolRouter("tracked-pool-router") + createTestGroupRouter("tracked-group-router") + createTestPoolRouter("non-tracked-pool-router") + createTestGroupRouter("non-tracked-group-router") + createTestPoolRouter("tracked-explicitly-excluded-pool-router") + createTestGroupRouter("tracked-explicitly-excluded-group-router") + + routerMetricsRecorderOf("user/tracked-pool-router") should not be empty + routerMetricsRecorderOf("user/tracked-group-router") should not be empty + routerMetricsRecorderOf("user/non-tracked-pool-router") shouldBe empty + routerMetricsRecorderOf("user/non-tracked-group-router") shouldBe empty + routerMetricsRecorderOf("user/tracked-explicitly-excluded-pool-router") shouldBe empty + routerMetricsRecorderOf("user/tracked-explicitly-excluded-group-router") shouldBe empty + } + + "record the routing-time of the receive function for pool routers" in new RouterMetricsFixtures { + val listener = TestProbe() + val router = createTestPoolRouter("measuring-routing-time-in-pool-router") + + router.tell(Ping, listener.ref) + listener.expectMsg(Pong) + val routerSnapshot = collectMetricsOf("user/measuring-routing-time-in-pool-router").get + + routerSnapshot.routingTime.numberOfMeasurements should be(1L) + } + + "record the routing-time of the receive function for group routers" in new RouterMetricsFixtures { + val listener = TestProbe() + val router = createTestGroupRouter("measuring-routing-time-in-group-router") + + router.tell(Ping, listener.ref) + listener.expectMsg(Pong) + val routerSnapshot = collectMetricsOf("user/measuring-routing-time-in-group-router").get + + routerSnapshot.routingTime.numberOfMeasurements should be(1L) + } + + "record the processing-time of the receive function for pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestPoolRouter("measuring-processing-time-in-pool-router") + + router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref) + val timings = timingsListener.expectMsgType[RouterTrackedTimings] + val routerSnapshot = collectMetricsOf("user/measuring-processing-time-in-pool-router").get + + routerSnapshot.processingTime.numberOfMeasurements should be(1L) + routerSnapshot.processingTime.recordsIterator.next().count should be(1L) + routerSnapshot.processingTime.recordsIterator.next().level should be(timings.approximateProcessingTime +- 10.millis.toNanos) + } + + "record the processing-time of the receive function for group routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestGroupRouter("measuring-processing-time-in-group-router") + + router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref) + val timings = timingsListener.expectMsgType[RouterTrackedTimings] + val routerSnapshot = collectMetricsOf("user/measuring-processing-time-in-group-router").get + + routerSnapshot.processingTime.numberOfMeasurements should be(1L) + routerSnapshot.processingTime.recordsIterator.next().count should be(1L) + routerSnapshot.processingTime.recordsIterator.next().level should be(timings.approximateProcessingTime +- 10.millis.toNanos) + } + + "record the number of errors for pool routers" in new RouterMetricsFixtures { + val listener = TestProbe() + val router = createTestPoolRouter("measuring-errors-in-pool-router") + + for (i ← 1 to 10) { + router.tell(Fail, listener.ref) + } + + router.tell(Ping, listener.ref) + listener.expectMsg(Pong) + + val routerSnapshot = collectMetricsOf("user/measuring-errors-in-pool-router").get + routerSnapshot.errors.count should be(10L) + } + + "record the number of errors for group routers" in new RouterMetricsFixtures { + val listener = TestProbe() + val router = createTestGroupRouter("measuring-errors-in-group-router") + + for (i ← 1 to 10) { + router.tell(Fail, listener.ref) + } + + router.tell(Ping, listener.ref) + listener.expectMsg(Pong) + + val routerSnapshot = collectMetricsOf("user/measuring-errors-in-group-router").get + routerSnapshot.errors.count should be(10L) + } + + "record the time-in-mailbox for pool routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestPoolRouter("measuring-time-in-mailbox-in-pool-router") + + router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref) + val timings = timingsListener.expectMsgType[RouterTrackedTimings] + val routerSnapshot = collectMetricsOf("user/measuring-time-in-mailbox-in-pool-router").get + + routerSnapshot.timeInMailbox.numberOfMeasurements should be(1L) + routerSnapshot.timeInMailbox.recordsIterator.next().count should be(1L) + routerSnapshot.timeInMailbox.recordsIterator.next().level should be(timings.approximateTimeInMailbox +- 10.millis.toNanos) + } + + "record the time-in-mailbox for group routers" in new RouterMetricsFixtures { + val timingsListener = TestProbe() + val router = createTestGroupRouter("measuring-time-in-mailbox-in-group-router") + + router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref) + val timings = timingsListener.expectMsgType[RouterTrackedTimings] + val routerSnapshot = collectMetricsOf("user/measuring-time-in-mailbox-in-group-router").get + + routerSnapshot.timeInMailbox.numberOfMeasurements should be(1L) + routerSnapshot.timeInMailbox.recordsIterator.next().count should be(1L) + routerSnapshot.timeInMailbox.recordsIterator.next().level should be(timings.approximateTimeInMailbox +- 10.millis.toNanos) + } + + "clean up the associated recorder when the pool router is stopped" in new RouterMetricsFixtures { + val trackedRouter = createTestPoolRouter("stop-in-pool-router") + routerMetricsRecorderOf("user/stop-in-pool-router") should not be empty + + val deathWatcher = TestProbe() + deathWatcher.watch(trackedRouter) + trackedRouter ! PoisonPill + deathWatcher.expectTerminated(trackedRouter) + + routerMetricsRecorderOf("user/stop-in-pool-router") shouldBe empty + } + + "clean up the associated recorder when the group router is stopped" in new RouterMetricsFixtures { + val trackedRouter = createTestPoolRouter("stop-in-group-router") + routerMetricsRecorderOf("user/stop-in-group-router") should not be empty + + val deathWatcher = TestProbe() + deathWatcher.watch(trackedRouter) + trackedRouter ! PoisonPill + deathWatcher.expectTerminated(trackedRouter) + + routerMetricsRecorderOf("user/stop-in-group-router") shouldBe empty + } + } + + override protected def afterAll(): Unit = shutdown() + + trait RouterMetricsFixtures { + val collectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(10000) + } + + def routerMetricsRecorderOf(routerName: String): Option[RouterMetricsRecorder] = + Kamon(Metrics)(system).storage.get(RouterMetrics(routerName)).map(_.asInstanceOf[RouterMetricsRecorder]) + + def collectMetricsOf(routerName: String): Option[RouterMetricSnapshot] = { + Thread.sleep(5) // Just in case the test advances a bit faster than the actor being tested. + routerMetricsRecorderOf(routerName).map(_.collect(collectionContext)) + } + + def createTestGroupRouter(routerName: String): ActorRef = { + val routees = Vector.fill(5) { + system.actorOf(Props[RouterMetricsTestActor]) + } + + val group = system.actorOf(RoundRobinGroup(routees.map(_.path.toStringWithoutAddress)).props(), routerName) + + //val router = system.actorOf(RoundRobinPool(5).props(Props[RouterMetricsTestActor]), routerName) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + group.tell(Ping, initialiseListener.ref) + initialiseListener.expectMsg(Pong) + + // Cleanup all the metric recording instruments: + collectMetricsOf("user/" + routerName) + + group + } + + def createTestPoolRouter(routerName: String): ActorRef = { + val router = system.actorOf(RoundRobinPool(5).props(Props[RouterMetricsTestActor]), routerName) + val initialiseListener = TestProbe() + + // Ensure that the router has been created before returning. + router.tell(Ping, initialiseListener.ref) + initialiseListener.expectMsg(Pong) + + // Cleanup all the metric recording instruments: + collectMetricsOf("user/" + routerName) + + router + } + } + + trait ActorMetricsFixtures { + val collectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(10000) + } + + def createTestActor(name: String): ActorRef = system.actorOf(Props[ActorMetricsTestActor], name) + + def takeSnapshotOf(amr: ActorMetricsRecorder): ActorMetricSnapshot = amr.collect(collectionContext) + } +} + +class RouterMetricsTestActor extends Actor { + def receive = { + case Discard ⇒ + case Fail ⇒ throw new ArithmeticException("Division by zero.") + case Ping ⇒ sender ! Pong + case RouterTrackTimings(sendTimestamp, sleep) ⇒ { + val dequeueTimestamp = System.nanoTime() + sleep.map(s ⇒ Thread.sleep(s.toMillis)) + val afterReceiveTimestamp = System.nanoTime() + + sender ! RouterTrackedTimings(sendTimestamp, dequeueTimestamp, afterReceiveTimestamp) + } + } +} + +object RouterMetricsTestActor { + case object Ping + case object Pong + case object Fail + case object Discard + + case class RouterTrackTimings(sendTimestamp: Long = System.nanoTime(), sleep: Option[Duration] = None) + case class RouterTrackedTimings(sendTimestamp: Long, dequeueTimestamp: Long, afterReceiveTimestamp: Long) { + def approximateTimeInMailbox: Long = dequeueTimestamp - sendTimestamp + def approximateProcessingTime: Long = afterReceiveTimestamp - dequeueTimestamp + } +} -- cgit v1.2.3