aboutsummaryrefslogtreecommitdiff
path: root/kamon-akka/src/test/scala/kamon/akka
diff options
context:
space:
mode:
Diffstat (limited to 'kamon-akka/src/test/scala/kamon/akka')
-rw-r--r--kamon-akka/src/test/scala/kamon/akka/ActorMetricsSpec.scala217
-rw-r--r--kamon-akka/src/test/scala/kamon/akka/DispatcherMetricsSpec.scala211
-rw-r--r--kamon-akka/src/test/scala/kamon/akka/RouterMetricsSpec.scala277
-rw-r--r--kamon-akka/src/test/scala/kamon/akka/instrumentation/AskPatternInstrumentationSpec.scala2
4 files changed, 706 insertions, 1 deletions
diff --git a/kamon-akka/src/test/scala/kamon/akka/ActorMetricsSpec.scala b/kamon-akka/src/test/scala/kamon/akka/ActorMetricsSpec.scala
new file mode 100644
index 00000000..19a71053
--- /dev/null
+++ b/kamon-akka/src/test/scala/kamon/akka/ActorMetricsSpec.scala
@@ -0,0 +1,217 @@
+/* =========================================================================================
+ * Copyright © 2013 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.akka
+
+import java.nio.LongBuffer
+
+import akka.actor._
+import akka.testkit.TestProbe
+import com.typesafe.config.ConfigFactory
+import kamon.Kamon
+import kamon.akka.ActorMetricsTestActor._
+import kamon.metric.EntitySnapshot
+import kamon.metric.instrument.CollectionContext
+import kamon.testkit.BaseKamonSpec
+
+import scala.concurrent.duration._
+
+class ActorMetricsSpec extends BaseKamonSpec("actor-metrics-spec") {
+ override lazy val config =
+ ConfigFactory.parseString(
+ """
+ |kamon.metric {
+ | tick-interval = 1 hour
+ | default-collection-context-buffer-size = 10
+ |
+ | filters {
+ | akka-actor {
+ | includes = [ "user/tracked-*", "user/measuring-*", "user/clean-after-collect", "user/stop" ]
+ | excludes = [ "user/tracked-explicitly-excluded", "user/non-tracked-actor" ]
+ | }
+ | }
+ |
+ | instrument-settings {
+ | akka-actor.mailbox-size.refresh-interval = 1 hour
+ | }
+ |}
+ |
+ |akka.loglevel = OFF
+ |
+ """.stripMargin)
+
+ "the Kamon actor metrics" should {
+ "respect the configured include and exclude filters" in new ActorMetricsFixtures {
+ val trackedActor = createTestActor("tracked-actor")
+ actorMetricsRecorderOf(trackedActor) should not be empty
+
+ val nonTrackedActor = createTestActor("non-tracked-actor")
+ actorMetricsRecorderOf(nonTrackedActor) shouldBe empty
+
+ val trackedButExplicitlyExcluded = createTestActor("tracked-explicitly-excluded")
+ actorMetricsRecorderOf(trackedButExplicitlyExcluded) shouldBe empty
+ }
+
+ "reset all recording instruments after taking a snapshot" in new ActorMetricsFixtures {
+ val trackedActor = createTestActor("clean-after-collect")
+
+ for (_ ← 1 to 100) {
+ for (i ← 1 to 100) {
+ trackedActor ! Discard
+ }
+ trackedActor ! Fail
+ trackedActor ! Ping
+ expectMsg(Pong)
+
+ val firstSnapshot = collectMetricsOf(trackedActor).get
+ firstSnapshot.counter("errors").get.count should be(1L)
+ firstSnapshot.minMaxCounter("mailbox-size").get.numberOfMeasurements should be > 0L
+ firstSnapshot.histogram("processing-time").get.numberOfMeasurements should be(102L) // 102 examples
+ firstSnapshot.histogram("time-in-mailbox").get.numberOfMeasurements should be(102L) // 102 examples
+
+ val secondSnapshot = collectMetricsOf(trackedActor).get // Ensure that the recorders are clean
+ secondSnapshot.counter("errors").get.count should be(0L)
+ secondSnapshot.minMaxCounter("mailbox-size").get.numberOfMeasurements should be(3L) // min, max and current
+ secondSnapshot.histogram("processing-time").get.numberOfMeasurements should be(0L)
+ secondSnapshot.histogram("time-in-mailbox").get.numberOfMeasurements should be(0L)
+ }
+ }
+
+ "record the processing-time of the receive function" in new ActorMetricsFixtures {
+ val trackedActor = createTestActor("measuring-processing-time")
+
+ trackedActor ! TrackTimings(sleep = Some(100 millis))
+ val timings = expectMsgType[TrackedTimings]
+ val snapshot = collectMetricsOf(trackedActor).get
+
+ snapshot.histogram("processing-time").get.numberOfMeasurements should be(1L)
+ snapshot.histogram("processing-time").get.recordsIterator.next().count should be(1L)
+ snapshot.histogram("processing-time").get.recordsIterator.next().level should be(timings.approximateProcessingTime +- 10.millis.toNanos)
+ }
+
+ "record the number of errors" in new ActorMetricsFixtures {
+ val trackedActor = createTestActor("measuring-errors")
+
+ for (i ← 1 to 10) { trackedActor ! Fail }
+ trackedActor ! Ping
+ expectMsg(Pong)
+ val snapshot = collectMetricsOf(trackedActor).get
+
+ snapshot.counter("errors").get.count should be(10)
+ }
+
+ "record the mailbox-size" in new ActorMetricsFixtures {
+ val trackedActor = createTestActor("measuring-mailbox-size")
+
+ trackedActor ! TrackTimings(sleep = Some(100 millis))
+ for (i ← 1 to 10) {
+ trackedActor ! Discard
+ }
+ trackedActor ! Ping
+
+ val timings = expectMsgType[TrackedTimings]
+ expectMsg(Pong)
+ val snapshot = collectMetricsOf(trackedActor).get
+
+ snapshot.minMaxCounter("mailbox-size").get.min should be(0L)
+ snapshot.minMaxCounter("mailbox-size").get.max should be(11L +- 1L)
+ }
+
+ "record the time-in-mailbox" in new ActorMetricsFixtures {
+ val trackedActor = createTestActor("measuring-time-in-mailbox")
+
+ trackedActor ! TrackTimings(sleep = Some(100 millis))
+ val timings = expectMsgType[TrackedTimings]
+ val snapshot = collectMetricsOf(trackedActor).get
+
+ snapshot.histogram("time-in-mailbox").get.numberOfMeasurements should be(1L)
+ snapshot.histogram("time-in-mailbox").get.recordsIterator.next().count should be(1L)
+ snapshot.histogram("time-in-mailbox").get.recordsIterator.next().level should be(timings.approximateTimeInMailbox +- 10.millis.toNanos)
+ }
+
+ "clean up the associated recorder when the actor is stopped" in new ActorMetricsFixtures {
+ val trackedActor = createTestActor("stop")
+ val firstRecorder = actorMetricsRecorderOf(trackedActor).get
+
+ // Killing the actor should remove it's ActorMetrics and registering again bellow should create a new one.
+ val deathWatcher = TestProbe()
+ deathWatcher.watch(trackedActor)
+ trackedActor ! PoisonPill
+ deathWatcher.expectTerminated(trackedActor)
+
+ actorMetricsRecorderOf(trackedActor).get shouldNot be theSameInstanceAs (firstRecorder)
+ }
+ }
+
+ override protected def afterAll(): Unit = shutdown()
+
+ trait ActorMetricsFixtures {
+ val collectionContext = new CollectionContext {
+ val buffer: LongBuffer = LongBuffer.allocate(10000)
+ }
+
+ def actorRecorderName(ref: ActorRef): String = ref.path.elements.mkString("/")
+
+ def actorMetricsRecorderOf(ref: ActorRef): Option[ActorMetrics] =
+ Kamon.metrics.register(ActorMetrics, actorRecorderName(ref)).map(_.recorder)
+
+ def collectMetricsOf(ref: ActorRef): Option[EntitySnapshot] = {
+ Thread.sleep(5) // Just in case the test advances a bit faster than the actor being tested.
+ actorMetricsRecorderOf(ref).map(_.collect(collectionContext))
+ }
+
+ def createTestActor(name: String): ActorRef = {
+ val actor = system.actorOf(Props[ActorMetricsTestActor], name)
+ val initialiseListener = TestProbe()
+
+ // Ensure that the router has been created before returning.
+ actor.tell(Ping, initialiseListener.ref)
+ initialiseListener.expectMsg(Pong)
+
+ // Cleanup all the metric recording instruments:
+ collectMetricsOf(actor)
+
+ actor
+ }
+ }
+}
+
+class ActorMetricsTestActor extends Actor {
+ def receive = {
+ case Discard ⇒
+ case Fail ⇒ throw new ArithmeticException("Division by zero.")
+ case Ping ⇒ sender ! Pong
+ case TrackTimings(sendTimestamp, sleep) ⇒ {
+ val dequeueTimestamp = System.nanoTime()
+ sleep.map(s ⇒ Thread.sleep(s.toMillis))
+ val afterReceiveTimestamp = System.nanoTime()
+
+ sender ! TrackedTimings(sendTimestamp, dequeueTimestamp, afterReceiveTimestamp)
+ }
+ }
+}
+
+object ActorMetricsTestActor {
+ case object Ping
+ case object Pong
+ case object Fail
+ case object Discard
+
+ case class TrackTimings(sendTimestamp: Long = System.nanoTime(), sleep: Option[Duration] = None)
+ case class TrackedTimings(sendTimestamp: Long, dequeueTimestamp: Long, afterReceiveTimestamp: Long) {
+ def approximateTimeInMailbox: Long = dequeueTimestamp - sendTimestamp
+ def approximateProcessingTime: Long = afterReceiveTimestamp - dequeueTimestamp
+ }
+}
diff --git a/kamon-akka/src/test/scala/kamon/akka/DispatcherMetricsSpec.scala b/kamon-akka/src/test/scala/kamon/akka/DispatcherMetricsSpec.scala
new file mode 100644
index 00000000..209b5cf5
--- /dev/null
+++ b/kamon-akka/src/test/scala/kamon/akka/DispatcherMetricsSpec.scala
@@ -0,0 +1,211 @@
+/* =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.akka
+
+import java.nio.LongBuffer
+
+import akka.actor.{ ActorRef, ActorSystem }
+import akka.dispatch.MessageDispatcher
+import akka.testkit.{ TestKitBase, TestProbe }
+import com.typesafe.config.ConfigFactory
+import kamon.Kamon
+import kamon.metric.instrument.CollectionContext
+import kamon.metric.{ EntityRecorder, EntitySnapshot }
+import kamon.testkit.BaseKamonSpec
+import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }
+
+import scala.concurrent.duration._
+import scala.concurrent.{ Await, Future }
+
+class DispatcherMetricsSpec extends BaseKamonSpec("dispatcher-metrics-spec") {
+ override lazy val config =
+ ConfigFactory.parseString(
+ """
+ |kamon.metric {
+ | tick-interval = 1 hour
+ | default-collection-context-buffer-size = 10
+ |
+ | filters = {
+ | akka-dispatcher {
+ | includes = [ "*" ]
+ | excludes = [ "explicitly-excluded" ]
+ | }
+ | }
+ |
+ | default-instrument-settings {
+ | gauge.refresh-interval = 1 hour
+ | min-max-counter.refresh-interval = 1 hour
+ | }
+ |}
+ |
+ |explicitly-excluded {
+ | type = "Dispatcher"
+ | executor = "fork-join-executor"
+ |}
+ |
+ |tracked-fjp {
+ | type = "Dispatcher"
+ | executor = "fork-join-executor"
+ |
+ | fork-join-executor {
+ | parallelism-min = 8
+ | parallelism-factor = 100.0
+ | parallelism-max = 22
+ | }
+ |}
+ |
+ |tracked-tpe {
+ | type = "Dispatcher"
+ | executor = "thread-pool-executor"
+ |
+ | thread-pool-executor {
+ | core-pool-size-min = 7
+ | core-pool-size-factor = 100.0
+ | max-pool-size-factor = 100.0
+ | max-pool-size-max = 21
+ | }
+ |}
+ |
+ """.stripMargin)
+
+ "the Kamon dispatcher metrics" should {
+ "respect the configured include and exclude filters" in {
+ val defaultDispatcher = forceInit(system.dispatchers.lookup("akka.actor.default-dispatcher"))
+ val fjpDispatcher = forceInit(system.dispatchers.lookup("tracked-fjp"))
+ val tpeDispatcher = forceInit(system.dispatchers.lookup("tracked-tpe"))
+ val excludedDispatcher = forceInit(system.dispatchers.lookup("explicitly-excluded"))
+
+ findDispatcherRecorder(defaultDispatcher) shouldNot be(empty)
+ findDispatcherRecorder(fjpDispatcher) shouldNot be(empty)
+ findDispatcherRecorder(tpeDispatcher) shouldNot be(empty)
+ findDispatcherRecorder(excludedDispatcher) should be(empty)
+ }
+
+ "record metrics for a dispatcher with thread-pool-executor" in {
+ implicit val tpeDispatcher = system.dispatchers.lookup("tracked-tpe")
+ refreshDispatcherInstruments(tpeDispatcher)
+ collectDispatcherMetrics(tpeDispatcher)
+
+ Await.result({
+ Future.sequence {
+ for (_ ← 1 to 100) yield submit(tpeDispatcher)
+ }
+ }, 5 seconds)
+
+ refreshDispatcherInstruments(tpeDispatcher)
+ val snapshot = collectDispatcherMetrics(tpeDispatcher)
+
+ snapshot.gauge("active-threads") should not be empty
+ snapshot.gauge("pool-size").get.min should be >= 7L
+ snapshot.gauge("pool-size").get.max should be <= 21L
+ snapshot.gauge("max-pool-size").get.max should be(21)
+ snapshot.gauge("core-pool-size").get.max should be(21)
+ snapshot.gauge("processed-tasks").get.max should be(102L +- 5L)
+
+ // The processed tasks should be reset to 0 if no more tasks are submitted.
+ val secondSnapshot = collectDispatcherMetrics(tpeDispatcher)
+ secondSnapshot.gauge("processed-tasks").get.max should be(0)
+ }
+
+ "record metrics for a dispatcher with fork-join-executor" in {
+ implicit val fjpDispatcher = system.dispatchers.lookup("tracked-fjp")
+ collectDispatcherMetrics(fjpDispatcher)
+
+ Await.result({
+ Future.sequence {
+ for (_ ← 1 to 100) yield submit(fjpDispatcher)
+ }
+ }, 5 seconds)
+
+ refreshDispatcherInstruments(fjpDispatcher)
+ val snapshot = collectDispatcherMetrics(fjpDispatcher)
+
+ snapshot.minMaxCounter("parallelism").get.max should be(22)
+ snapshot.gauge("pool-size").get.min should be >= 0L
+ snapshot.gauge("pool-size").get.max should be <= 22L
+ snapshot.gauge("active-threads").get.max should be >= 0L
+ snapshot.gauge("running-threads").get.max should be >= 0L
+ snapshot.gauge("queued-task-count").get.max should be(0)
+
+ }
+
+ "clean up the metrics recorders after a dispatcher is shut down" in {
+ implicit val tpeDispatcher = system.dispatchers.lookup("tracked-tpe")
+ implicit val fjpDispatcher = system.dispatchers.lookup("tracked-fjp")
+
+ findDispatcherRecorder(fjpDispatcher) shouldNot be(empty)
+ findDispatcherRecorder(tpeDispatcher) shouldNot be(empty)
+
+ shutdownDispatcher(tpeDispatcher)
+ shutdownDispatcher(fjpDispatcher)
+
+ findDispatcherRecorder(fjpDispatcher) should be(empty)
+ findDispatcherRecorder(tpeDispatcher) should be(empty)
+ }
+
+ }
+
+ def actorRecorderName(ref: ActorRef): String = ref.path.elements.mkString("/")
+
+ def findDispatcherRecorder(dispatcher: MessageDispatcher): Option[EntityRecorder] =
+ Kamon.metrics.find(dispatcher.id, "akka-dispatcher")
+
+ def collectDispatcherMetrics(dispatcher: MessageDispatcher): EntitySnapshot =
+ findDispatcherRecorder(dispatcher).map(_.collect(collectionContext)).get
+
+ def refreshDispatcherInstruments(dispatcher: MessageDispatcher): Unit = {
+ findDispatcherRecorder(dispatcher) match {
+ case Some(tpe: ThreadPoolExecutorDispatcherMetrics) ⇒
+ tpe.processedTasks.refreshValue()
+ tpe.activeThreads.refreshValue()
+ tpe.maxPoolSize.refreshValue()
+ tpe.poolSize.refreshValue()
+ tpe.corePoolSize.refreshValue()
+
+ case Some(fjp: ForkJoinPoolDispatcherMetrics) ⇒
+ fjp.activeThreads.refreshValue()
+ fjp.poolSize.refreshValue()
+ fjp.queuedTaskCount.refreshValue()
+ fjp.paralellism.refreshValues()
+ fjp.runningThreads.refreshValue()
+
+ case other ⇒
+ }
+ }
+
+ def forceInit(dispatcher: MessageDispatcher): MessageDispatcher = {
+ val listener = TestProbe()
+ Future {
+ listener.ref ! "init done"
+ }(dispatcher)
+ listener.expectMsg("init done")
+
+ dispatcher
+ }
+
+ def submit(dispatcher: MessageDispatcher): Future[String] = Future {
+ "hello"
+ }(dispatcher)
+
+ def shutdownDispatcher(dispatcher: MessageDispatcher): Unit = {
+ val shutdownMethod = dispatcher.getClass.getDeclaredMethod("shutdown")
+ shutdownMethod.setAccessible(true)
+ shutdownMethod.invoke(dispatcher)
+ }
+
+ override protected def afterAll(): Unit = system.shutdown()
+}
+
diff --git a/kamon-akka/src/test/scala/kamon/akka/RouterMetricsSpec.scala b/kamon-akka/src/test/scala/kamon/akka/RouterMetricsSpec.scala
new file mode 100644
index 00000000..ec55648b
--- /dev/null
+++ b/kamon-akka/src/test/scala/kamon/akka/RouterMetricsSpec.scala
@@ -0,0 +1,277 @@
+/* =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.akka
+
+import java.nio.LongBuffer
+
+import akka.actor._
+import akka.routing._
+import akka.testkit.TestProbe
+import com.typesafe.config.ConfigFactory
+import kamon.Kamon
+import kamon.akka.RouterMetricsTestActor._
+import kamon.metric.EntitySnapshot
+import kamon.metric.instrument.CollectionContext
+import kamon.testkit.BaseKamonSpec
+
+import scala.concurrent.duration._
+
+class RouterMetricsSpec extends BaseKamonSpec("router-metrics-spec") {
+ override lazy val config =
+ ConfigFactory.parseString(
+ """
+ |kamon.metric {
+ | tick-interval = 1 hour
+ | default-collection-context-buffer-size = 10
+ |
+ | filters = {
+ | akka-router {
+ | includes = [ "user/tracked-*", "user/measuring-*", "user/stop-*" ]
+ | excludes = [ "user/tracked-explicitly-excluded-*"]
+ | }
+ | }
+ |}
+ |
+ |akka.loglevel = OFF
+ |
+ """.stripMargin)
+
+ "the Kamon router metrics" should {
+ "respect the configured include and exclude filters" in new RouterMetricsFixtures {
+ createTestPoolRouter("tracked-pool-router")
+ createTestGroupRouter("tracked-group-router")
+ createTestPoolRouter("non-tracked-pool-router")
+ createTestGroupRouter("non-tracked-group-router")
+ createTestPoolRouter("tracked-explicitly-excluded-pool-router")
+ createTestGroupRouter("tracked-explicitly-excluded-group-router")
+
+ routerMetricsRecorderOf("user/tracked-pool-router") should not be empty
+ routerMetricsRecorderOf("user/tracked-group-router") should not be empty
+ routerMetricsRecorderOf("user/non-tracked-pool-router") shouldBe empty
+ routerMetricsRecorderOf("user/non-tracked-group-router") shouldBe empty
+ routerMetricsRecorderOf("user/tracked-explicitly-excluded-pool-router") shouldBe empty
+ routerMetricsRecorderOf("user/tracked-explicitly-excluded-group-router") shouldBe empty
+ }
+
+ "record the routing-time of the receive function for pool routers" in new RouterMetricsFixtures {
+ val listener = TestProbe()
+ val router = createTestPoolRouter("measuring-routing-time-in-pool-router")
+
+ router.tell(Ping, listener.ref)
+ listener.expectMsg(Pong)
+ val routerSnapshot = collectMetricsOf("user/measuring-routing-time-in-pool-router").get
+
+ routerSnapshot.histogram("routing-time").get.numberOfMeasurements should be(1L)
+ }
+
+ "record the routing-time of the receive function for group routers" in new RouterMetricsFixtures {
+ val listener = TestProbe()
+ val router = createTestGroupRouter("measuring-routing-time-in-group-router")
+
+ router.tell(Ping, listener.ref)
+ listener.expectMsg(Pong)
+ val routerSnapshot = collectMetricsOf("user/measuring-routing-time-in-group-router").get
+
+ routerSnapshot.histogram("routing-time").get.numberOfMeasurements should be(1L)
+ }
+
+ "record the processing-time of the receive function for pool routers" in new RouterMetricsFixtures {
+ val timingsListener = TestProbe()
+ val router = createTestPoolRouter("measuring-processing-time-in-pool-router")
+
+ router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)
+ val timings = timingsListener.expectMsgType[RouterTrackedTimings]
+ val routerSnapshot = collectMetricsOf("user/measuring-processing-time-in-pool-router").get
+
+ routerSnapshot.histogram("processing-time").get.numberOfMeasurements should be(1L)
+ routerSnapshot.histogram("processing-time").get.recordsIterator.next().count should be(1L)
+ routerSnapshot.histogram("processing-time").get.recordsIterator.next().level should be(timings.approximateProcessingTime +- 10.millis.toNanos)
+ }
+
+ "record the processing-time of the receive function for group routers" in new RouterMetricsFixtures {
+ val timingsListener = TestProbe()
+ val router = createTestGroupRouter("measuring-processing-time-in-group-router")
+
+ router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)
+ val timings = timingsListener.expectMsgType[RouterTrackedTimings]
+ val routerSnapshot = collectMetricsOf("user/measuring-processing-time-in-group-router").get
+
+ routerSnapshot.histogram("processing-time").get.numberOfMeasurements should be(1L)
+ routerSnapshot.histogram("processing-time").get.recordsIterator.next().count should be(1L)
+ routerSnapshot.histogram("processing-time").get.recordsIterator.next().level should be(timings.approximateProcessingTime +- 10.millis.toNanos)
+ }
+
+ "record the number of errors for pool routers" in new RouterMetricsFixtures {
+ val listener = TestProbe()
+ val router = createTestPoolRouter("measuring-errors-in-pool-router")
+
+ for (i ← 1 to 10) {
+ router.tell(Fail, listener.ref)
+ }
+
+ router.tell(Ping, listener.ref)
+ listener.expectMsg(Pong)
+
+ val routerSnapshot = collectMetricsOf("user/measuring-errors-in-pool-router").get
+ routerSnapshot.counter("errors").get.count should be(10L)
+ }
+
+ "record the number of errors for group routers" in new RouterMetricsFixtures {
+ val listener = TestProbe()
+ val router = createTestGroupRouter("measuring-errors-in-group-router")
+
+ for (i ← 1 to 10) {
+ router.tell(Fail, listener.ref)
+ }
+
+ router.tell(Ping, listener.ref)
+ listener.expectMsg(Pong)
+
+ val routerSnapshot = collectMetricsOf("user/measuring-errors-in-group-router").get
+ routerSnapshot.counter("errors").get.count should be(10L)
+ }
+
+ "record the time-in-mailbox for pool routers" in new RouterMetricsFixtures {
+ val timingsListener = TestProbe()
+ val router = createTestPoolRouter("measuring-time-in-mailbox-in-pool-router")
+
+ router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)
+ val timings = timingsListener.expectMsgType[RouterTrackedTimings]
+ val routerSnapshot = collectMetricsOf("user/measuring-time-in-mailbox-in-pool-router").get
+
+ routerSnapshot.histogram("time-in-mailbox").get.numberOfMeasurements should be(1L)
+ routerSnapshot.histogram("time-in-mailbox").get.recordsIterator.next().count should be(1L)
+ routerSnapshot.histogram("time-in-mailbox").get.recordsIterator.next().level should be(timings.approximateTimeInMailbox +- 10.millis.toNanos)
+ }
+
+ "record the time-in-mailbox for group routers" in new RouterMetricsFixtures {
+ val timingsListener = TestProbe()
+ val router = createTestGroupRouter("measuring-time-in-mailbox-in-group-router")
+
+ router.tell(RouterTrackTimings(sleep = Some(1 second)), timingsListener.ref)
+ val timings = timingsListener.expectMsgType[RouterTrackedTimings]
+ val routerSnapshot = collectMetricsOf("user/measuring-time-in-mailbox-in-group-router").get
+
+ routerSnapshot.histogram("time-in-mailbox").get.numberOfMeasurements should be(1L)
+ routerSnapshot.histogram("time-in-mailbox").get.recordsIterator.next().count should be(1L)
+ routerSnapshot.histogram("time-in-mailbox").get.recordsIterator.next().level should be(timings.approximateTimeInMailbox +- 10.millis.toNanos)
+ }
+
+ "clean up the associated recorder when the pool router is stopped" in new RouterMetricsFixtures {
+ val trackedRouter = createTestPoolRouter("stop-in-pool-router")
+ val firstRecorder = routerMetricsRecorderOf("user/stop-in-pool-router").get
+
+ // Killing the router should remove it's RouterMetrics and registering again bellow should create a new one.
+ val deathWatcher = TestProbe()
+ deathWatcher.watch(trackedRouter)
+ trackedRouter ! PoisonPill
+ deathWatcher.expectTerminated(trackedRouter)
+
+ routerMetricsRecorderOf("user/stop-in-pool-router").get shouldNot be theSameInstanceAs (firstRecorder)
+ }
+
+ "clean up the associated recorder when the group router is stopped" in new RouterMetricsFixtures {
+ val trackedRouter = createTestPoolRouter("stop-in-group-router")
+ val firstRecorder = routerMetricsRecorderOf("user/stop-in-group-router").get
+
+ // Killing the router should remove it's RouterMetrics and registering again bellow should create a new one.
+ val deathWatcher = TestProbe()
+ deathWatcher.watch(trackedRouter)
+ trackedRouter ! PoisonPill
+ deathWatcher.expectTerminated(trackedRouter)
+
+ routerMetricsRecorderOf("user/stop-in-group-router").get shouldNot be theSameInstanceAs (firstRecorder)
+ }
+ }
+
+ override protected def afterAll(): Unit = shutdown()
+
+ trait RouterMetricsFixtures {
+ val collectionContext = new CollectionContext {
+ val buffer: LongBuffer = LongBuffer.allocate(10000)
+ }
+
+ def routerMetricsRecorderOf(routerName: String): Option[RouterMetrics] =
+ Kamon.metrics.register(RouterMetrics, routerName).map(_.recorder)
+
+ def collectMetricsOf(routerName: String): Option[EntitySnapshot] = {
+ Thread.sleep(5) // Just in case the test advances a bit faster than the actor being tested.
+ routerMetricsRecorderOf(routerName).map(_.collect(collectionContext))
+ }
+
+ def createTestGroupRouter(routerName: String): ActorRef = {
+ val routees = Vector.fill(5) {
+ system.actorOf(Props[RouterMetricsTestActor])
+ }
+
+ val group = system.actorOf(RoundRobinGroup(routees.map(_.path.toStringWithoutAddress)).props(), routerName)
+
+ //val router = system.actorOf(RoundRobinPool(5).props(Props[RouterMetricsTestActor]), routerName)
+ val initialiseListener = TestProbe()
+
+ // Ensure that the router has been created before returning.
+ group.tell(Ping, initialiseListener.ref)
+ initialiseListener.expectMsg(Pong)
+
+ // Cleanup all the metric recording instruments:
+ collectMetricsOf("user/" + routerName)
+
+ group
+ }
+
+ def createTestPoolRouter(routerName: String): ActorRef = {
+ val router = system.actorOf(RoundRobinPool(5).props(Props[RouterMetricsTestActor]), routerName)
+ val initialiseListener = TestProbe()
+
+ // Ensure that the router has been created before returning.
+ router.tell(Ping, initialiseListener.ref)
+ initialiseListener.expectMsg(Pong)
+
+ // Cleanup all the metric recording instruments:
+ collectMetricsOf("user/" + routerName)
+
+ router
+ }
+ }
+}
+
+class RouterMetricsTestActor extends Actor {
+ def receive = {
+ case Discard ⇒
+ case Fail ⇒ throw new ArithmeticException("Division by zero.")
+ case Ping ⇒ sender ! Pong
+ case RouterTrackTimings(sendTimestamp, sleep) ⇒ {
+ val dequeueTimestamp = System.nanoTime()
+ sleep.map(s ⇒ Thread.sleep(s.toMillis))
+ val afterReceiveTimestamp = System.nanoTime()
+
+ sender ! RouterTrackedTimings(sendTimestamp, dequeueTimestamp, afterReceiveTimestamp)
+ }
+ }
+}
+
+object RouterMetricsTestActor {
+ case object Ping
+ case object Pong
+ case object Fail
+ case object Discard
+
+ case class RouterTrackTimings(sendTimestamp: Long = System.nanoTime(), sleep: Option[Duration] = None)
+ case class RouterTrackedTimings(sendTimestamp: Long, dequeueTimestamp: Long, afterReceiveTimestamp: Long) {
+ def approximateTimeInMailbox: Long = dequeueTimestamp - sendTimestamp
+ def approximateProcessingTime: Long = afterReceiveTimestamp - dequeueTimestamp
+ }
+}
diff --git a/kamon-akka/src/test/scala/kamon/akka/instrumentation/AskPatternInstrumentationSpec.scala b/kamon-akka/src/test/scala/kamon/akka/instrumentation/AskPatternInstrumentationSpec.scala
index 0d63a19e..44b90642 100644
--- a/kamon-akka/src/test/scala/kamon/akka/instrumentation/AskPatternInstrumentationSpec.scala
+++ b/kamon-akka/src/test/scala/kamon/akka/instrumentation/AskPatternInstrumentationSpec.scala
@@ -114,7 +114,7 @@ class AskPatternInstrumentationSpec extends BaseKamonSpec("ask-pattern-tracing-s
}
def setAskPatternTimeoutWarningMode(mode: String): Unit = {
- val target = Kamon(Akka)(system)
+ val target = Kamon(Akka)
val field = target.getClass.getDeclaredField("askPatternTimeoutWarning")
field.setAccessible(true)
field.set(target, mode)