aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--kamon-core/src/main/scala/kamon/instrumentation/ExecutorServiceMetrics.scala162
-rw-r--r--kamon-core/src/main/scala/kamon/instrumentation/MessageQueueMetrics.scala77
-rw-r--r--site/src/main/jekyll/_includes/navigation-bar.html1
-rw-r--r--site/src/main/jekyll/acknowledgments.md36
-rw-r--r--site/src/main/jekyll/assets/img/diagrams/metric-collection-concepts.pngbin0 -> 131456 bytes
-rw-r--r--site/src/main/jekyll/core/metrics.md93
-rw-r--r--site/src/main/jekyll/statsd/index.md12
8 files changed, 143 insertions, 239 deletions
diff --git a/.gitignore b/.gitignore
index 1abb40f3..ead8dff7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
.history
*.sc
.pygments-cache
+.DS_Store
# sbt specific
dist/*
diff --git a/kamon-core/src/main/scala/kamon/instrumentation/ExecutorServiceMetrics.scala b/kamon-core/src/main/scala/kamon/instrumentation/ExecutorServiceMetrics.scala
deleted file mode 100644
index 90d2b270..00000000
--- a/kamon-core/src/main/scala/kamon/instrumentation/ExecutorServiceMetrics.scala
+++ /dev/null
@@ -1,162 +0,0 @@
-/* ===================================================
- * Copyright © 2013 the kamon project <http://kamon.io/>
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-package kamon.instrumentation
-
-import org.aspectj.lang.annotation._
-import java.util.concurrent._
-import org.aspectj.lang.ProceedingJoinPoint
-import java.util
-import akka.dispatch.{ MonitorableThreadFactory, ExecutorServiceFactory }
-import com.typesafe.config.Config
-import scala.concurrent.forkjoin.ForkJoinPool
-import akka.dispatch.ForkJoinExecutorConfigurator.AkkaForkJoinPool
-
-@Aspect
-class ActorSystemInstrumentation {
-
- @Pointcut("execution(akka.actor.ActorSystemImpl.new(..)) && args(name, applicationConfig, classLoader)")
- def actorSystemInstantiation(name: String, applicationConfig: Config, classLoader: ClassLoader) = {}
-
- @After("actorSystemInstantiation(name, applicationConfig, classLoader)")
- def registerActorSystem(name: String, applicationConfig: Config, classLoader: ClassLoader): Unit = {
-
- //Kamon.Metric.registerActorSystem(name)
- }
-}
-
-@Aspect("perthis(forkJoinPoolInstantiation(int, scala.concurrent.forkjoin.ForkJoinPool.ForkJoinWorkerThreadFactory, java.lang.Thread.UncaughtExceptionHandler))")
-class ForkJoinPoolInstrumentation {
- /* var activeThreadsHistogram: Histogram = _
- var poolSizeHistogram: Histogram = _*/
-
- @Pointcut("execution(akka.dispatch.ForkJoinExecutorConfigurator.AkkaForkJoinPool.new(..)) && args(parallelism, threadFactory, exceptionHandler)")
- def forkJoinPoolInstantiation(parallelism: Int, threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, exceptionHandler: Thread.UncaughtExceptionHandler) = {}
-
- @After("forkJoinPoolInstantiation(parallelism, threadFactory, exceptionHandler)")
- def initializeMetrics(parallelism: Int, threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, exceptionHandler: Thread.UncaughtExceptionHandler): Unit = {
- /*val (actorSystemName, dispatcherName) = threadFactory match {
- case mtf: MonitorableThreadFactory => splitName(mtf.name, Kamon.Metric.actorSystemNames)
- case _ => ("Unknown", "Unknown")
- }
-
- val metrics = Kamon.Metric.actorSystem(actorSystemName).get.registerDispatcher(dispatcherName)
- for(m <- metrics) {
- activeThreadsHistogram = m.activeThreadCount
- poolSizeHistogram = m.poolSize
- println(s"Registered $dispatcherName for actor system $actorSystemName")
- }*/
- }
-
- def splitName(threadFactoryName: String, knownActorSystems: List[String]): (String, String) = {
- knownActorSystems.find(threadFactoryName.startsWith(_)).map(asName ⇒ (asName, threadFactoryName.substring(asName.length + 1))).getOrElse(("Unkown", "Unkown"))
- }
-
- @Pointcut("execution(* scala.concurrent.forkjoin.ForkJoinPool.scan(..)) && this(fjp)")
- def forkJoinScan(fjp: AkkaForkJoinPool): Unit = {}
-
- @After("forkJoinScan(fjp)")
- def updateMetrics(fjp: AkkaForkJoinPool): Unit = {
- /*activeThreadsHistogram.update(fjp.getActiveThreadCount)
- poolSizeHistogram.update(fjp.getPoolSize)*/
- }
-
-}
-
-/**
- * ExecutorService monitoring base:
- */
-trait ExecutorServiceCollector {
- def updateActiveThreadCount(diff: Int): Unit
- def updateTotalThreadCount(diff: Int): Unit
- def updateQueueSize(diff: Int): Unit
-}
-
-trait WatchedExecutorService {
- def collector: ExecutorServiceCollector
-}
-
-/*
-trait ExecutorServiceMonitoring {
- def dispatcherMetrics: DispatcherMetricCollector
-}
-
-class ExecutorServiceMonitoringImpl extends ExecutorServiceMonitoring {
- @volatile var dispatcherMetrics: DispatcherMetricCollector = _
-}
-*/
-
-case class NamedExecutorServiceFactoryDelegate(actorSystemName: String, dispatcherName: String, delegate: ExecutorServiceFactory) extends ExecutorServiceFactory {
- def createExecutorService: ExecutorService = delegate.createExecutorService
-}
-
-@Aspect
-class ExecutorServiceFactoryProviderInstrumentation {
-
- @Pointcut("execution(* akka.dispatch.ExecutorServiceFactoryProvider+.createExecutorServiceFactory(..)) && args(dispatcherName, threadFactory) && if()")
- def factoryMethodCall(dispatcherName: String, threadFactory: ThreadFactory): Boolean = {
- true
- }
-
- @Around("factoryMethodCall(dispatcherName, threadFactory)")
- def enrichFactoryCreationWithNames(pjp: ProceedingJoinPoint, dispatcherName: String, threadFactory: ThreadFactory): ExecutorServiceFactory = {
- val delegate = pjp.proceed().asInstanceOf[ExecutorServiceFactory] // Safe Cast
-
- val actorSystemName = threadFactory match {
- case m: MonitorableThreadFactory ⇒ m.name
- case _ ⇒ "Unknown" // Find an alternative way to find the actor system name in case we start seeing "Unknown" as the AS name.
- }
-
- new NamedExecutorServiceFactoryDelegate(actorSystemName, dispatcherName, delegate)
- }
-
-}
-
-@Aspect
-class NamedExecutorServiceFactoryDelegateInstrumentation {
-
- @Pointcut("execution(* akka.dispatch.NamedExecutorServiceFactoryDelegate.createExecutorService()) && this(namedFactory)")
- def factoryMethodCall(namedFactory: NamedExecutorServiceFactoryDelegate) = {}
-
- @Around("factoryMethodCall(namedFactory)")
- def enrichExecutorServiceWithMetricNameRoot(pjp: ProceedingJoinPoint, namedFactory: NamedExecutorServiceFactoryDelegate): ExecutorService = {
- val delegate = pjp.proceed().asInstanceOf[ExecutorService]
- val executorFullName = "" //MetricDirectory.nameForDispatcher(namedFactory.actorSystemName, namedFactory.dispatcherName)
-
- //ExecutorServiceMetricCollector.register(executorFullName, delegate)
-
- new NamedExecutorServiceDelegate(executorFullName, delegate)
- }
-}
-
-case class NamedExecutorServiceDelegate(fullName: String, delegate: ExecutorService) extends ExecutorService {
- def shutdown() = {
- //ExecutorServiceMetricCollector.deregister(fullName)
- delegate.shutdown()
- }
- def shutdownNow(): util.List[Runnable] = delegate.shutdownNow()
- def isShutdown: Boolean = delegate.isShutdown
- def isTerminated: Boolean = delegate.isTerminated
- def awaitTermination(timeout: Long, unit: TimeUnit): Boolean = delegate.awaitTermination(timeout, unit)
- def submit[T](task: Callable[T]): Future[T] = delegate.submit(task)
- def submit[T](task: Runnable, result: T): Future[T] = delegate.submit(task, result)
- def submit(task: Runnable): Future[_] = delegate.submit(task)
- def invokeAll[T](tasks: util.Collection[_ <: Callable[T]]): util.List[Future[T]] = delegate.invokeAll(tasks)
- def invokeAll[T](tasks: util.Collection[_ <: Callable[T]], timeout: Long, unit: TimeUnit): util.List[Future[T]] = delegate.invokeAll(tasks, timeout, unit)
- def invokeAny[T](tasks: util.Collection[_ <: Callable[T]]): T = delegate.invokeAny(tasks)
- def invokeAny[T](tasks: util.Collection[_ <: Callable[T]], timeout: Long, unit: TimeUnit): T = delegate.invokeAny(tasks, timeout, unit)
- def execute(command: Runnable) = delegate.execute(command)
-}
-
diff --git a/kamon-core/src/main/scala/kamon/instrumentation/MessageQueueMetrics.scala b/kamon-core/src/main/scala/kamon/instrumentation/MessageQueueMetrics.scala
deleted file mode 100644
index 44eb8c43..00000000
--- a/kamon-core/src/main/scala/kamon/instrumentation/MessageQueueMetrics.scala
+++ /dev/null
@@ -1,77 +0,0 @@
-/* ===================================================
- * Copyright © 2013 the kamon project <http://kamon.io/>
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-package kamon.instrumentation
-
-import akka.dispatch.{ UnboundedMessageQueueSemantics, Envelope, MessageQueue }
-import org.aspectj.lang.annotation.{ Around, Pointcut, DeclareMixin, Aspect }
-import akka.actor.{ ActorSystem, ActorRef }
-import org.aspectj.lang.ProceedingJoinPoint
-
-/**
- * For Mailboxes we would like to track the queue size and message latency. Currently the latency
- * will be gathered from the ActorCellMetrics.
- */
-/*
-
-@Aspect
-class MessageQueueInstrumentation {
-
- @Pointcut("execution(* akka.dispatch.MailboxType+.create(..)) && args(owner, system)")
- def messageQueueCreation(owner: Option[ActorRef], system: Option[ActorSystem]) = {}
-
- @Around("messageQueueCreation(owner, system)")
- def wrapMessageQueue(pjp: ProceedingJoinPoint, owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = {
- val delegate = pjp.proceed.asInstanceOf[MessageQueue]
-
- // We are not interested in monitoring mailboxes if we don't know where they belong to.
- val monitoredMailbox = for (own ← owner; sys ← system) yield {
- val systemName = sys.name
- val ownerName = MetricDirectory.nameForActor(own)
- val mailBoxName = MetricDirectory.nameForMailbox(systemName, ownerName)
-
- val queueSizeHistogram = new Histogram(new ExponentiallyDecayingReservoir())
- Metrics.include(mailBoxName, queueSizeHistogram)
-
- new MonitoredMessageQueue(delegate, queueSizeHistogram)
- }
-
- monitoredMailbox match {
- case None ⇒ delegate
- case Some(mmb) ⇒ mmb
- }
- }
-}
-
-class MonitoredMessageQueue(val delegate: MessageQueue, val queueSizeHistogram: Histogram) extends MessageQueue with UnboundedMessageQueueSemantics {
-
- def enqueue(receiver: ActorRef, handle: Envelope) = {
- delegate.enqueue(receiver, handle)
- //queueSizeHistogram.update(numberOfMessages)
- }
-
- def dequeue(): Envelope = {
- val envelope = delegate.dequeue()
- //queueSizeHistogram.update(numberOfMessages)
-
- envelope
- }
-
- def numberOfMessages: Int = delegate.numberOfMessages
- def hasMessages: Boolean = delegate.hasMessages
- def cleanUp(owner: ActorRef, deadLetters: MessageQueue) = delegate.cleanUp(owner, deadLetters)
-}
-*/
-
diff --git a/site/src/main/jekyll/_includes/navigation-bar.html b/site/src/main/jekyll/_includes/navigation-bar.html
index 4103de05..d104d058 100644
--- a/site/src/main/jekyll/_includes/navigation-bar.html
+++ b/site/src/main/jekyll/_includes/navigation-bar.html
@@ -37,6 +37,7 @@
<li><a tabindex="-1" href="https://groups.google.com/forum/#!forum/kamon-user"><i class="fa fa-google-plus"></i> Mailing List</a></li>
</ul>
</li>
+ <li><a href="/acknowledgments/">Acknowledgments</a></li>
<li><a href="/teamblog/">Team Blog</a></li>
</ul>
</div>
diff --git a/site/src/main/jekyll/acknowledgments.md b/site/src/main/jekyll/acknowledgments.md
new file mode 100644
index 00000000..f75f4e24
--- /dev/null
+++ b/site/src/main/jekyll/acknowledgments.md
@@ -0,0 +1,36 @@
+---
+title: Kamon | Acknowledgments
+layout: default
+---
+
+Acknowledgments
+===============
+
+We, the Kamon team, would like to express our gratitude to all the people and companies that help us make Kamon the best
+solution in the metrics collection space for Akka, Spray and Play!. Let's give names and regards to this wonderful
+fellows:
+
+Our contributors
+----------------
+
+Everything starts with an idea, and [this](https://github.com/kamon-io/Kamon/graphs/contributors) guys are helping us
+take that idea and make it a reality. A reality that is helping developers around the world to measure and monitor their
+success with reactive technologies. Kudos to all of you!
+
+
+Our users
+---------
+
+It is absolutely rewarding to know that Kamon is useful for people around the world, and it is even better when these
+people come to us looking for help, reporting issues, giving feedback or telling us how smoothly Kamon is monitoring
+their production systems, thanks for using Kamon! keep coming and spread the word :).
+
+
+
+[YourKit, LLC](http://www.yourkit.com)
+--------------------------------------
+
+We care a lot about performance and we try hard to keep Kamon's overhead as low as possible, but we couldn't succeed on
+this matter without [YourKit's Java Profiler](http://www.yourkit.com/java/profiler/index.jsp). It is well known to be
+one of the best profilers out there and they have been so kind to support us by providing a open source use license to
+Kamon developers. Thanks YourKit! We highly appreciate your support and commitment to the open source community.
diff --git a/site/src/main/jekyll/assets/img/diagrams/metric-collection-concepts.png b/site/src/main/jekyll/assets/img/diagrams/metric-collection-concepts.png
new file mode 100644
index 00000000..05f67710
--- /dev/null
+++ b/site/src/main/jekyll/assets/img/diagrams/metric-collection-concepts.png
Binary files differ
diff --git a/site/src/main/jekyll/core/metrics.md b/site/src/main/jekyll/core/metrics.md
new file mode 100644
index 00000000..dcef8304
--- /dev/null
+++ b/site/src/main/jekyll/core/metrics.md
@@ -0,0 +1,93 @@
+---
+title: Kamon | Core | Documentation
+layout: default
+---
+
+Metrics
+=======
+
+Some intro about metrics
+
+Philosophy
+----------
+
+Back in the day, the most common approach to get metrics out of an Akka/Spray application for production monitoring was
+doing manual instrumentation: select your favorite metrics collection library, wrap you messages with some useful
+metadata, wrap your actor's receive function with some metrics measuring code and, finally, push that metrics data out
+to somewhere you can keep it, graph it and analyse it whenever you want.
+
+Each metrics collection library has it's own strengths and weaknesses, and each developer has to choose wisely according
+to the requirements they have in hand, leading them in different paths as they progress with their applications. Each
+path has different implications with regards to introduced overhead and latency, metrics data accuracy and memory
+consumption. Kamon takes this responsibility out of the developer and tries to make the best choice to provide high
+performance metrics collection instruments while keeping the inherent overhead as low as possible.
+
+Kamon tries to select the best possible approach, so you don't have to.
+
+
+Metrics Collection and Flushing
+-------------------------------
+
+All the metrics infrastructure in Kamon was designed around two concepts: collection and flushing. Metrics collection
+happens in real time, as soon as the information is available for being recorded. Let's see a simple example: as soon as
+a actor finishes processing a message, Kamon knows the elapsed time for processing that specific message and it is
+recorded right away. If you have millions of messages passing through your system, then millions of measurements will be
+taken.
+
+Flushing happens recurrently after a fixed amount of time has passed, a tick. Upon each tick, Kamon will collect all
+measurements recorded since the last tick, flush the collected data and reset all the instruments to zero. Let's explore
+a little bit more on how this two concepts are modeled inside Kamon.
+
+<img class="img-responsive" src="/assets/img/diagrams/metric-collection-concepts.png">
+
+A metric group contains various individual metrics that are related to the same entity, for example, if the entity we
+are talking about is an actor, the metrics related to processing time, mailbox size and time in mailbox for that
+specific actor are grouped inside a single metric group, and each actor gets its own metric group. As you might disguise
+from the diagram above, on the left we have the mutable side of the process that is constantly recoding measurements as
+the events flow through your application and on the right we have the immutable side, containing snapshots representing
+all the measurements taken during a specific period on time for a metric group.
+
+
+Filtering Entities
+------------------
+
+By default Kamon will not include any entity for metrics collection and you will need to explicitly include all the
+entities you are interested in, be it a actor, a trace, a dispatcher or any other entity monitored by Kamon. The
+`kamon.metrics.filters` key on your application's configuration controls which entities must be included/excluded from
+the metrics collection infrastructure. Includes and excludes are provided as lists of strings containing the
+corresponding GLOB patterns for each group, and the logic behind is simple: include everything that matches at least one
+`includes` pattern and does not match any of the `excludes` patterns. The following configuration file sample includes
+the `user/job-manager` actor and all the worker actors, but leaves out all system actors and the `user/worker-helper`
+actor.
+
+```
+kamon {
+ metrics {
+ filters = [
+ {
+ actor {
+ includes = [ "user/job-manager", "user/worker-*" ]
+ excludes = [ "system/*", "user/worker-helper" ]
+ }
+ },
+ {
+ trace {
+ includes = [ "*" ]
+ excludes = []
+ }
+ }
+ ]
+ }
+}
+```
+
+Instruments
+-----------
+
+Talk about how HDR Histogram works and how we use it.
+
+
+Subscription protocol
+---------------------
+
+Explain how to subscribe for metrics data and provide a simple example.
diff --git a/site/src/main/jekyll/statsd/index.md b/site/src/main/jekyll/statsd/index.md
new file mode 100644
index 00000000..ddc11960
--- /dev/null
+++ b/site/src/main/jekyll/statsd/index.md
@@ -0,0 +1,12 @@
+---
+title: Kamon | StatsD | Documentation
+layout: default
+---
+
+What is StatsD?
+=======
+
+StatsD is a simple network daemon that continuously receives metrics pushed over UDP and periodically sends aggregate metrics to upstream services
+like Graphite. Because it uses UDP, clients (for example, web applications) can ship metrics to it very fast with little to no overhead.
+This means that a user can capture multiple metrics for every request to a web application, even at a rate of thousands of requests per second.
+Request-level metrics are aggregated over a flush interval (default 10 seconds) and pushed to an upstream metrics service. \ No newline at end of file