aboutsummaryrefslogtreecommitdiff
path: root/kamon-core/src/legacy-main
diff options
context:
space:
mode:
Diffstat (limited to 'kamon-core/src/legacy-main')
-rw-r--r--kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java224
-rw-r--r--kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java191
-rw-r--r--kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java371
-rw-r--r--kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java110
-rw-r--r--kamon-core/src/legacy-main/resources/META-INF/aop.xml15
-rw-r--r--kamon-core/src/legacy-main/resources/reference.conf184
-rw-r--r--kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala25
-rw-r--r--kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala25
-rw-r--r--kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala25
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/Kamon.scala115
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala128
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala37
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala235
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala63
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala47
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala57
-rwxr-xr-xkamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala394
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala123
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala116
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala65
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala53
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala65
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala120
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala331
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala51
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala51
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala73
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala105
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala31
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala115
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala109
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala94
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala186
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala101
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala202
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala64
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala51
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala45
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala197
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala113
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala26
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala54
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala48
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala38
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala25
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala14
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/Latency.scala29
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala69
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala43
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala37
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala25
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala5
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala27
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala30
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala40
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala107
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala45
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala99
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala134
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala41
-rw-r--r--kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala49
61 files changed, 5692 insertions, 0 deletions
diff --git a/kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java b/kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java
new file mode 100644
index 00000000..7e47ae63
--- /dev/null
+++ b/kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java
@@ -0,0 +1,224 @@
+/*
+
+Note: this was copied from Doug Lea's CVS repository
+ http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/
+
+LongAdder.java version 1.14
+
+*/
+
+
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package kamon.jsr166;
+
+import java.io.Serializable;
+
+/**
+ * One or more variables that together maintain an initially zero
+ * {@code long} sum. When updates (method {@link #add}) are contended
+ * across threads, the set of variables may grow dynamically to reduce
+ * contention. Method {@link #sum} (or, equivalently, {@link
+ * #longValue}) returns the current total combined across the
+ * variables maintaining the sum.
+ *
+ * <p>This class is usually preferable to {@link java.util.concurrent.atomic.AtomicLong} when
+ * multiple threads update a common sum that is used for purposes such
+ * as collecting statistics, not for fine-grained synchronization
+ * control. Under low update contention, the two classes have similar
+ * characteristics. But under high contention, expected throughput of
+ * this class is significantly higher, at the expense of higher space
+ * consumption.
+ *
+ * <p>This class extends {@link Number}, but does <em>not</em> define
+ * methods such as {@code equals}, {@code hashCode} and {@code
+ * compareTo} because instances are expected to be mutated, and so are
+ * not useful as collection keys.
+ *
+ * <p><em>jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.</em>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class LongAdder extends Striped64 implements Serializable {
+ private static final long serialVersionUID = 7249069246863182397L;
+
+ /**
+ * Version of plus for use in retryUpdate
+ */
+ final long fn(long v, long x) { return v + x; }
+
+ /**
+ * Creates a new adder with initial sum of zero.
+ */
+ public LongAdder() {
+ }
+
+ /**
+ * Adds the given value.
+ *
+ * @param x the value to add
+ */
+ public void add(long x) {
+ Cell[] as; long b, v; HashCode hc; Cell a; int n;
+ if ((as = cells) != null || !casBase(b = base, b + x)) {
+ boolean uncontended = true;
+ int h = (hc = threadHashCode.get()).code;
+ if (as == null || (n = as.length) < 1 ||
+ (a = as[(n - 1) & h]) == null ||
+ !(uncontended = a.cas(v = a.value, v + x)))
+ retryUpdate(x, hc, uncontended);
+ }
+ }
+
+ /**
+ * Equivalent to {@code add(1)}.
+ */
+ public void increment() {
+ add(1L);
+ }
+
+ /**
+ * Equivalent to {@code add(-1)}.
+ */
+ public void decrement() {
+ add(-1L);
+ }
+
+ /**
+ * Returns the current sum. The returned value is <em>NOT</em> an
+ * atomic snapshot; invocation in the absence of concurrent
+ * updates returns an accurate result, but concurrent updates that
+ * occur while the sum is being calculated might not be
+ * incorporated.
+ *
+ * @return the sum
+ */
+ public long sum() {
+ long sum = base;
+ Cell[] as = cells;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null)
+ sum += a.value;
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Resets variables maintaining the sum to zero. This method may
+ * be a useful alternative to creating a new adder, but is only
+ * effective if there are no concurrent updates. Because this
+ * method is intrinsically racy, it should only be used when it is
+ * known that no threads are concurrently updating.
+ */
+ public void reset() {
+ internalReset(0L);
+ }
+
+ /**
+ * Equivalent in effect to {@link #sum} followed by {@link
+ * #reset}. This method may apply for example during quiescent
+ * points between multithreaded computations. If there are
+ * updates concurrent with this method, the returned value is
+ * <em>not</em> guaranteed to be the final value occurring before
+ * the reset.
+ *
+ * @return the sum
+ */
+ public long sumThenReset() {
+ long sum = base;
+ Cell[] as = cells;
+ base = 0L;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ sum += a.value;
+ a.value = 0L;
+ }
+ }
+ }
+ return sum;
+ }
+
+ public long sumAndReset() {
+ long sum = getAndSetBase(0L);
+ Cell[] as = cells;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ sum += a.getAndSet(0L);
+ }
+ }
+ }
+ return sum;
+ }
+
+ /**
+ * Returns the String representation of the {@link #sum}.
+ * @return the String representation of the {@link #sum}
+ */
+ public String toString() {
+ return Long.toString(sum());
+ }
+
+ /**
+ * Equivalent to {@link #sum}.
+ *
+ * @return the sum
+ */
+ public long longValue() {
+ return sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as an {@code int} after a narrowing
+ * primitive conversion.
+ */
+ public int intValue() {
+ return (int)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code float}
+ * after a widening primitive conversion.
+ */
+ public float floatValue() {
+ return (float)sum();
+ }
+
+ /**
+ * Returns the {@link #sum} as a {@code double} after a widening
+ * primitive conversion.
+ */
+ public double doubleValue() {
+ return (double)sum();
+ }
+
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeLong(sum());
+ }
+
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ busy = 0;
+ cells = null;
+ base = s.readLong();
+ }
+
+}
diff --git a/kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java b/kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java
new file mode 100644
index 00000000..fc9ea4e5
--- /dev/null
+++ b/kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java
@@ -0,0 +1,191 @@
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package kamon.jsr166;
+import java.io.Serializable;
+
+/**
+ * One or more variables that together maintain a running {@code long}
+ * maximum with initial value {@code Long.MIN_VALUE}. When updates
+ * (method {@link #update}) are contended across threads, the set of
+ * variables may grow dynamically to reduce contention. Method {@link
+ * #max} (or, equivalently, {@link #longValue}) returns the current
+ * maximum across the variables maintaining updates.
+ *
+ * <p>This class extends {@link Number}, but does <em>not</em> define
+ * methods such as {@code equals}, {@code hashCode} and {@code
+ * compareTo} because instances are expected to be mutated, and so are
+ * not useful as collection keys.
+ *
+ * <p><em>jsr166e note: This class is targeted to be placed in
+ * java.util.concurrent.atomic.</em>
+ *
+ * @since 1.8
+ * @author Doug Lea
+ */
+public class LongMaxUpdater extends Striped64 implements Serializable {
+ private static final long serialVersionUID = 7249069246863182397L;
+
+ /**
+ * Version of max for use in retryUpdate
+ */
+ final long fn(long v, long x) { return v > x ? v : x; }
+
+ /**
+ * Creates a new instance with initial maximum of {@code
+ * Long.MIN_VALUE}.
+ */
+ public LongMaxUpdater() {
+ base = Long.MIN_VALUE;
+ }
+
+ /**
+ * Creates a new instance with the given initialValue
+ */
+ public LongMaxUpdater(long initialValue) {
+ base = initialValue;
+ }
+
+
+ /**
+ * Updates the maximum to be at least the given value.
+ *
+ * @param x the value to update
+ */
+ public void update(long x) {
+ Cell[] as; long b, v; HashCode hc; Cell a; int n;
+ if ((as = cells) != null ||
+ (b = base) < x && !casBase(b, x)) {
+ boolean uncontended = true;
+ int h = (hc = threadHashCode.get()).code;
+ if (as == null || (n = as.length) < 1 ||
+ (a = as[(n - 1) & h]) == null ||
+ ((v = a.value) < x && !(uncontended = a.cas(v, x))))
+ retryUpdate(x, hc, uncontended);
+ }
+ }
+
+ /**
+ * Returns the current maximum. The returned value is
+ * <em>NOT</em> an atomic snapshot; invocation in the absence of
+ * concurrent updates returns an accurate result, but concurrent
+ * updates that occur while the value is being calculated might
+ * not be incorporated.
+ *
+ * @return the maximum
+ */
+ public long max() {
+ Cell[] as = cells;
+ long max = base;
+ if (as != null) {
+ int n = as.length;
+ long v;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null && (v = a.value) > max)
+ max = v;
+ }
+ }
+ return max;
+ }
+
+ /**
+ * Resets variables maintaining updates to {@code Long.MIN_VALUE}.
+ * This method may be a useful alternative to creating a new
+ * updater, but is only effective if there are no concurrent
+ * updates. Because this method is intrinsically racy, it should
+ * only be used when it is known that no threads are concurrently
+ * updating.
+ */
+ public void reset() {
+ internalReset(Long.MIN_VALUE);
+ }
+
+ /**
+ * Equivalent in effect to {@link #max} followed by {@link
+ * #reset}. This method may apply for example during quiescent
+ * points between multithreaded computations. If there are
+ * updates concurrent with this method, the returned value is
+ * <em>not</em> guaranteed to be the final value occurring before
+ * the reset.
+ *
+ * @return the maximum
+ */
+ public long maxThenReset(long newValue) {
+ Cell[] as = cells;
+ long max = base;
+ base = newValue;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null) {
+ long v = a.value;
+ a.value = newValue;
+ if (v > max)
+ max = v;
+ }
+ }
+ }
+ return max;
+ }
+
+ /**
+ * Returns the String representation of the {@link #max}.
+ * @return the String representation of the {@link #max}
+ */
+ public String toString() {
+ return Long.toString(max());
+ }
+
+ /**
+ * Equivalent to {@link #max}.
+ *
+ * @return the maximum
+ */
+ public long longValue() {
+ return max();
+ }
+
+ /**
+ * Returns the {@link #max} as an {@code int} after a narrowing
+ * primitive conversion.
+ */
+ public int intValue() {
+ return (int)max();
+ }
+
+ /**
+ * Returns the {@link #max} as a {@code float}
+ * after a widening primitive conversion.
+ */
+ public float floatValue() {
+ return (float)max();
+ }
+
+ /**
+ * Returns the {@link #max} as a {@code double} after a widening
+ * primitive conversion.
+ */
+ public double doubleValue() {
+ return (double)max();
+ }
+
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
+ s.defaultWriteObject();
+ s.writeLong(max());
+ }
+
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+ busy = 0;
+ cells = null;
+ base = s.readLong();
+ }
+
+}
diff --git a/kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java b/kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java
new file mode 100644
index 00000000..8fbfa4ba
--- /dev/null
+++ b/kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java
@@ -0,0 +1,371 @@
+/*
+
+Note: this was copied from Doug Lea's CVS repository
+ http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/
+
+Striped64.java version 1.8
+
+*/
+
+
+/*
+ * Written by Doug Lea with assistance from members of JCP JSR-166
+ * Expert Group and released to the public domain, as explained at
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package kamon.jsr166;
+
+import java.util.Random;
+
+/**
+ * A package-local class holding common representation and mechanics
+ * for classes supporting dynamic striping on 64bit values. The class
+ * extends Number so that concrete subclasses must publicly do so.
+ */
+abstract class Striped64 extends Number {
+ /*
+ * This class maintains a lazily-initialized table of atomically
+ * updated variables, plus an extra "base" field. The table size
+ * is a power of two. Indexing uses masked per-thread hash codes.
+ * Nearly all declarations in this class are package-private,
+ * accessed directly by subclasses.
+ *
+ * Table entries are of class Cell; a variant of AtomicLong padded
+ * to reduce cache contention on most processors. Padding is
+ * overkill for most Atomics because they are usually irregularly
+ * scattered in memory and thus don't interfere much with each
+ * other. But Atomic objects residing in arrays will tend to be
+ * placed adjacent to each other, and so will most often share
+ * cache lines (with a huge negative performance impact) without
+ * this precaution.
+ *
+ * In part because Cells are relatively large, we avoid creating
+ * them until they are needed. When there is no contention, all
+ * updates are made to the base field. Upon first contention (a
+ * failed CAS on base update), the table is initialized to size 2.
+ * The table size is doubled upon further contention until
+ * reaching the nearest power of two greater than or equal to the
+ * number of CPUS. Table slots remain empty (null) until they are
+ * needed.
+ *
+ * A single spinlock ("busy") is used for initializing and
+ * resizing the table, as well as populating slots with new Cells.
+ * There is no need for a blocking lock; when the lock is not
+ * available, threads try other slots (or the base). During these
+ * retries, there is increased contention and reduced locality,
+ * which is still better than alternatives.
+ *
+ * Per-thread hash codes are initialized to random values.
+ * Contention and/or table collisions are indicated by failed
+ * CASes when performing an update operation (see method
+ * retryUpdate). Upon a collision, if the table size is less than
+ * the capacity, it is doubled in size unless some other thread
+ * holds the lock. If a hashed slot is empty, and lock is
+ * available, a new Cell is created. Otherwise, if the slot
+ * exists, a CAS is tried. Retries proceed by "double hashing",
+ * using a secondary hash (Marsaglia XorShift) to try to find a
+ * free slot.
+ *
+ * The table size is capped because, when there are more threads
+ * than CPUs, supposing that each thread were bound to a CPU,
+ * there would exist a perfect hash function mapping threads to
+ * slots that eliminates collisions. When we reach capacity, we
+ * search for this mapping by randomly varying the hash codes of
+ * colliding threads. Because search is random, and collisions
+ * only become known via CAS failures, convergence can be slow,
+ * and because threads are typically not bound to CPUS forever,
+ * may not occur at all. However, despite these limitations,
+ * observed contention rates are typically low in these cases.
+ *
+ * It is possible for a Cell to become unused when threads that
+ * once hashed to it terminate, as well as in the case where
+ * doubling the table causes no thread to hash to it under
+ * expanded mask. We do not try to detect or remove such cells,
+ * under the assumption that for long-running instances, observed
+ * contention levels will recur, so the cells will eventually be
+ * needed again; and for short-lived ones, it does not matter.
+ */
+
+ /**
+ * Padded variant of AtomicLong supporting only raw accesses plus CAS.
+ * The value field is placed between pads, hoping that the JVM doesn't
+ * reorder them.
+ *
+ * JVM intrinsics note: It would be possible to use a release-only
+ * form of CAS here, if it were provided.
+ */
+ static final class Cell {
+ volatile long p0, p1, p2, p3, p4, p5, p6;
+ volatile long value;
+ volatile long q0, q1, q2, q3, q4, q5, q6;
+ Cell(long x) { value = x; }
+
+ final boolean cas(long cmp, long val) {
+ return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val);
+ }
+
+ final long getAndSet(long val) {
+ long v;
+ do {
+ v = UNSAFE.getLongVolatile(this, valueOffset);
+ } while (!UNSAFE.compareAndSwapLong(this, valueOffset, v, val));
+ return v;
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long valueOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> ak = Cell.class;
+ valueOffset = UNSAFE.objectFieldOffset
+ (ak.getDeclaredField("value"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ }
+
+ /**
+ * Holder for the thread-local hash code. The code is initially
+ * random, but may be set to a different value upon collisions.
+ */
+ static final class HashCode {
+ static final Random rng = new Random();
+ int code;
+ HashCode() {
+ int h = rng.nextInt(); // Avoid zero to allow xorShift rehash
+ code = (h == 0) ? 1 : h;
+ }
+ }
+
+ /**
+ * The corresponding ThreadLocal class
+ */
+ static final class ThreadHashCode extends ThreadLocal<HashCode> {
+ public HashCode initialValue() { return new HashCode(); }
+ }
+
+ /**
+ * Static per-thread hash codes. Shared across all instances to
+ * reduce ThreadLocal pollution and because adjustments due to
+ * collisions in one table are likely to be appropriate for
+ * others.
+ */
+ static final ThreadHashCode threadHashCode = new ThreadHashCode();
+
+ /** Number of CPUS, to place bound on table size */
+ static final int NCPU = Runtime.getRuntime().availableProcessors();
+
+ /**
+ * Table of cells. When non-null, size is a power of 2.
+ */
+ transient volatile Cell[] cells;
+
+ /**
+ * Base value, used mainly when there is no contention, but also as
+ * a fallback during table initialization races. Updated via CAS.
+ */
+ transient volatile long base;
+
+ /**
+ * Spinlock (locked via CAS) used when resizing and/or creating Cells.
+ */
+ transient volatile int busy;
+
+ /**
+ * Package-private default constructor
+ */
+ Striped64() {
+ }
+
+ /**
+ * CASes the base field.
+ */
+ final boolean casBase(long cmp, long val) {
+ return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val);
+ }
+
+ /**
+ * CASes the base field.
+ */
+ final long getAndSetBase(long val) {
+ long v;
+ do {
+ v = UNSAFE.getLongVolatile(this, baseOffset);
+ } while (!UNSAFE.compareAndSwapLong(this, baseOffset, v, val));
+ return v;
+ }
+
+ /**
+ * CASes the busy field from 0 to 1 to acquire lock.
+ */
+ final boolean casBusy() {
+ return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1);
+ }
+
+ /**
+ * Computes the function of current and new value. Subclasses
+ * should open-code this update function for most uses, but the
+ * virtualized form is needed within retryUpdate.
+ *
+ * @param currentValue the current value (of either base or a cell)
+ * @param newValue the argument from a user update call
+ * @return result of the update function
+ */
+ abstract long fn(long currentValue, long newValue);
+
+ /**
+ * Handles cases of updates involving initialization, resizing,
+ * creating new Cells, and/or contention. See above for
+ * explanation. This method suffers the usual non-modularity
+ * problems of optimistic retry code, relying on rechecked sets of
+ * reads.
+ *
+ * @param x the value
+ * @param hc the hash code holder
+ * @param wasUncontended false if CAS failed before call
+ */
+ final void retryUpdate(long x, HashCode hc, boolean wasUncontended) {
+ int h = hc.code;
+ boolean collide = false; // True if last slot nonempty
+ for (;;) {
+ Cell[] as; Cell a; int n; long v;
+ if ((as = cells) != null && (n = as.length) > 0) {
+ if ((a = as[(n - 1) & h]) == null) {
+ if (busy == 0) { // Try to attach new Cell
+ Cell r = new Cell(x); // Optimistically create
+ if (busy == 0 && casBusy()) {
+ boolean created = false;
+ try { // Recheck under lock
+ Cell[] rs; int m, j;
+ if ((rs = cells) != null &&
+ (m = rs.length) > 0 &&
+ rs[j = (m - 1) & h] == null) {
+ rs[j] = r;
+ created = true;
+ }
+ } finally {
+ busy = 0;
+ }
+ if (created)
+ break;
+ continue; // Slot is now non-empty
+ }
+ }
+ collide = false;
+ }
+ else if (!wasUncontended) // CAS already known to fail
+ wasUncontended = true; // Continue after rehash
+ else if (a.cas(v = a.value, fn(v, x)))
+ break;
+ else if (n >= NCPU || cells != as)
+ collide = false; // At max size or stale
+ else if (!collide)
+ collide = true;
+ else if (busy == 0 && casBusy()) {
+ try {
+ if (cells == as) { // Expand table unless stale
+ Cell[] rs = new Cell[n << 1];
+ for (int i = 0; i < n; ++i)
+ rs[i] = as[i];
+ cells = rs;
+ }
+ } finally {
+ busy = 0;
+ }
+ collide = false;
+ continue; // Retry with expanded table
+ }
+ h ^= h << 13; // Rehash
+ h ^= h >>> 17;
+ h ^= h << 5;
+ }
+ else if (busy == 0 && cells == as && casBusy()) {
+ boolean init = false;
+ try { // Initialize table
+ if (cells == as) {
+ Cell[] rs = new Cell[2];
+ rs[h & 1] = new Cell(x);
+ cells = rs;
+ init = true;
+ }
+ } finally {
+ busy = 0;
+ }
+ if (init)
+ break;
+ }
+ else if (casBase(v = base, fn(v, x)))
+ break; // Fall back on using base
+ }
+ hc.code = h; // Record index for next time
+ }
+
+
+ /**
+ * Sets base and all cells to the given value.
+ */
+ final void internalReset(long initialValue) {
+ Cell[] as = cells;
+ base = initialValue;
+ if (as != null) {
+ int n = as.length;
+ for (int i = 0; i < n; ++i) {
+ Cell a = as[i];
+ if (a != null)
+ a.value = initialValue;
+ }
+ }
+ }
+
+ // Unsafe mechanics
+ private static final sun.misc.Unsafe UNSAFE;
+ private static final long baseOffset;
+ private static final long busyOffset;
+ static {
+ try {
+ UNSAFE = getUnsafe();
+ Class<?> sk = Striped64.class;
+ baseOffset = UNSAFE.objectFieldOffset
+ (sk.getDeclaredField("base"));
+ busyOffset = UNSAFE.objectFieldOffset
+ (sk.getDeclaredField("busy"));
+ } catch (Exception e) {
+ throw new Error(e);
+ }
+ }
+
+ /**
+ * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
+ * Replace with a simple call to Unsafe.getUnsafe when integrating
+ * into a jdk.
+ *
+ * @return a sun.misc.Unsafe
+ */
+ private static sun.misc.Unsafe getUnsafe() {
+ try {
+ return sun.misc.Unsafe.getUnsafe();
+ } catch (SecurityException tryReflectionInstead) {}
+ try {
+ return java.security.AccessController.doPrivileged
+ (new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
+ public sun.misc.Unsafe run() throws Exception {
+ Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
+ for (java.lang.reflect.Field f : k.getDeclaredFields()) {
+ f.setAccessible(true);
+ Object x = f.get(null);
+ if (k.isInstance(x))
+ return k.cast(x);
+ }
+ throw new NoSuchFieldError("the Unsafe");
+ }});
+ } catch (java.security.PrivilegedActionException e) {
+ throw new RuntimeException("Could not initialize intrinsics",
+ e.getCause());
+ }
+ }
+}
diff --git a/kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java b/kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java
new file mode 100644
index 00000000..ac5d08a6
--- /dev/null
+++ b/kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java
@@ -0,0 +1,110 @@
+/*
+ * =========================================================================================
+ * Copyright 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util;
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Default implementation of PathFilter. Uses glob based includes and excludes to determine whether to export.
+ *
+ * @author John E. Bailey
+ * @author <a href="mailto:david.lloyd@redhat.com">David M. Lloyd</a>
+ */
+public final class GlobPathFilter implements PathFilter {
+ private static final Pattern GLOB_PATTERN = Pattern.compile("(\\*\\*?)|(\\?)|(\\\\.)|(/+)|([^*?]+)");
+
+ private final String glob;
+ private final Pattern pattern;
+
+ /**
+ * Construct a new instance.
+ *
+ * @param glob the path glob to match
+ */
+ public GlobPathFilter(final String glob) {
+ pattern = getGlobPattern(glob);
+ this.glob = glob;
+ }
+
+ /**
+ * Determine whether a path should be accepted.
+ *
+ * @param path the path to check
+ * @return true if the path should be accepted, false if not
+ */
+ public boolean accept(final String path) {
+ return pattern.matcher(path).matches();
+ }
+
+ /**
+ * Get a regular expression pattern which accept any path names which match the given glob. The glob patterns
+ * function similarly to {@code ant} file patterns. Valid metacharacters in the glob pattern include:
+ * <ul>
+ * <li><code>"\"</code> - escape the next character (treat it literally, even if it is itself a recognized metacharacter)</li>
+ * <li><code>"?"</code> - match any non-slash character</li>
+ * <li><code>"*"</code> - match zero or more non-slash characters</li>
+ * <li><code>"**"</code> - match zero or more characters, including slashes</li>
+ * <li><code>"/"</code> - match one or more slash characters. Consecutive {@code /} characters are collapsed down into one.</li>
+ * </ul>
+ * In addition, any glob pattern matches all subdirectories thereof. A glob pattern ending in {@code /} is equivalent
+ * to a glob pattern ending in <code>/**</code> in that the named directory is not itself included in the glob.
+ * <p/>
+ * <b>See also:</b> <a href="http://ant.apache.org/manual/dirtasks.html#patterns">"Patterns" in the Ant Manual</a>
+ *
+ * @param glob the glob to match
+ *
+ * @return the pattern
+ */
+ private static Pattern getGlobPattern(final String glob) {
+ StringBuilder patternBuilder = new StringBuilder();
+ final Matcher m = GLOB_PATTERN.matcher(glob);
+ boolean lastWasSlash = false;
+ while (m.find()) {
+ lastWasSlash = false;
+ String grp;
+ if ((grp = m.group(1)) != null) {
+ // match a * or **
+ if (grp.length() == 2) {
+ // it's a *workers are able to process multiple metrics*
+ patternBuilder.append(".*");
+ } else {
+ // it's a *
+ patternBuilder.append("[^/]*");
+ }
+ } else if ((grp = m.group(2)) != null) {
+ // match a '?' glob pattern; any non-slash character
+ patternBuilder.append("[^/]");
+ } else if ((grp = m.group(3)) != null) {
+ // backslash-escaped value
+ patternBuilder.append(Pattern.quote(m.group().substring(1)));
+ } else if ((grp = m.group(4)) != null) {
+ // match any number of / chars
+ patternBuilder.append("/+");
+ lastWasSlash = true;
+ } else {
+ // some other string
+ patternBuilder.append(Pattern.quote(m.group()));
+ }
+ }
+ if (lastWasSlash) {
+ // ends in /, append **
+ patternBuilder.append(".*");
+ }
+ return Pattern.compile(patternBuilder.toString());
+ }
+}
diff --git a/kamon-core/src/legacy-main/resources/META-INF/aop.xml b/kamon-core/src/legacy-main/resources/META-INF/aop.xml
new file mode 100644
index 00000000..b13f9aac
--- /dev/null
+++ b/kamon-core/src/legacy-main/resources/META-INF/aop.xml
@@ -0,0 +1,15 @@
+<!DOCTYPE aspectj PUBLIC "-//AspectJ//DTD//EN" "http://www.eclipse.org/aspectj/dtd/aspectj.dtd">
+
+<aspectj>
+ <aspects>
+
+ <!-- Notify that AspectJ is present -->
+ <aspect name="kamon.AspectJPresent"/>
+
+ </aspects>
+
+ <weaver>
+ <include within="kamon..*"/>
+ </weaver>
+
+</aspectj> \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/resources/reference.conf b/kamon-core/src/legacy-main/resources/reference.conf
new file mode 100644
index 00000000..48441493
--- /dev/null
+++ b/kamon-core/src/legacy-main/resources/reference.conf
@@ -0,0 +1,184 @@
+# ================================== #
+# Kamon-Core Reference Configuration #
+# ================================== #
+
+kamon {
+ metric {
+
+ # Time interval for collecting all metrics and send the snapshots to all subscribed actors.
+ tick-interval = 10 seconds
+
+ # Default size for the LongBuffer that gets allocated for metrics collection and merge. The
+ # value should correspond to the highest number of different buckets with values that might
+ # exist in a single histogram during a metrics collection. The default value of 33792 is a
+ # very conservative value and its equal to the total number of buckets required to cover values
+ # from 1 nanosecond to 1 hour with 0.1% precision (3 significant value digits). That means
+ # that would need to have at least one measurement on every bucket of a single histogram to
+ # fully utilize this buffer, which is *really* unlikely to ever happen. Since the buffer should
+ # be allocated once and reused it shouldn't impose a memory footprint issue.
+ default-collection-context-buffer-size = 33792
+
+ # Disables a big error message that will be typically logged if your application wasn't started
+ # with the -javaagent:/path-to-aspectj-weaver.jar option. If you are only using KamonStandalone
+ # it might be ok for you to turn this error off.
+ disable-aspectj-weaver-missing-error = false
+
+ # Specify if entities that do not match any include/exclude filter should be tracked.
+ track-unmatched-entities = yes
+
+ filters {
+ trace {
+ includes = [ "**" ]
+ excludes = [ ]
+ }
+ }
+
+ # Default instrument settings for histograms, min max counters and gaugues. The actual settings to be used when
+ # creating a instrument is determined by merging the default settings, code settings and specific instrument
+ # settings using the following priorities (top wins):
+
+ # - any setting in `kamon.metric.instrument-settings` for the given category/instrument.
+ # - code settings provided when creating the instrument.
+ # - `default-instrument-settings`.
+ #
+ default-instrument-settings {
+ histogram {
+ precision = normal
+ lowest-discernible-value = 1
+ highest-trackable-value = 3600000000000
+ }
+
+ min-max-counter {
+ precision = normal
+ lowest-discernible-value = 1
+ highest-trackable-value = 999999999
+ refresh-interval = 100 milliseconds
+ }
+
+ gauge {
+ precision = normal
+ lowest-discernible-value = 1
+ highest-trackable-value = 3600000000000
+ refresh-interval = 100 milliseconds
+ }
+
+ }
+
+ # Custom configurations for category instruments. The settings provided in this section will override the default
+ # and code instrument settings as explained in the `default-instrument-settings` key. There is no need to provide
+ # full instrument settings in this section, only the settings that should be overriden must be included. Example:
+ # if you wish to change the precision and lowest discernible value of the `elapsed-time` instrument for the `trace`
+ # category, you should include the following configuration in your application.conf file:
+ #
+ # kamon.metric.instrument-settings.trace {
+ # elapsed-time {
+ # precision = fine
+ # lowest-discernible-value = 1000
+ # }
+ # }
+ #
+ # In this example, the value for the `highest-trackable-value` setting will be either the code setting or the default
+ # setting, depending on how the `elapsed-time` metric is created.
+ instrument-settings {
+
+ }
+ }
+
+
+ trace {
+
+ # Level of detail used when recording trace information. The possible values are:
+ # - metrics-only: metrics for all included traces and all segments are recorded, but no Trace messages will be sent
+ # to the subscribers of trace data.
+ # - simple-trace: metrics for all included traces and all segments are recorded and additionally a Trace message
+ # containing the trace and segments details and metadata.
+ level-of-detail = metrics-only
+
+ # Sampling strategy to apply when the tracing level is set to `simple-trace`. The options are: all, random, ordered,
+ # threshold and clock. The details of each sampler are below.
+ sampling = random
+
+ # Use a ThreadLocalRandom to generate numbers between 1 and 100, if the random number is less or equal to .chance
+ # then tracing information will be gathered and reported for the current trace.
+ random-sampler {
+ chance = 10
+ }
+
+ # Use a AtomicLong to ensure that every .sample-interval number of requests tracing information will be gathered and
+ # reported.
+ ordered-sampler {
+ # must be power of two
+ sample-interval = 8
+ }
+
+ # Fully qualified name of the function that will be used for generating tokens to traces.
+ token-generator = kamon.trace.DefaultTokenGenerator
+
+ # Gather tracing information for all traces but only report those whose elapsed-time is equal or greated to the
+ # .minimum-elapsed-time setting.
+ threshold-sampler {
+ minimum-elapsed-time = 1 second
+ }
+
+ # Use a FiniteDuration to only record a trace each .pause nanoseconds.
+ clock-sampler {
+ pause = 1 second
+ }
+
+ incubator {
+ # Minimum time to stay in the trace incubator before checking if the trace should not be incubated anymore. No
+ # checks are made at least until this period has passed.
+ min-incubation-time = 5 seconds
+
+ # Time to wait between incubation checks. After min-incubation-time, a trace is checked using this interval and if
+ # if shouldn't be incubated anymore, the TraceInfo is collected and reported for it.
+ check-interval = 1 second
+
+ # Max amount of time that a trace can be in the incubator. If this time is reached for a given trace then it will
+ # be reported with whatever information is available at the moment, logging a warning for each segment that remains
+ # open after this point.
+ max-incubation-time = 20 seconds
+ }
+ }
+
+ # All settings included under the internal-config key will be used to repleace the akka.* and spray.* settings. By
+ # doing this we avoid applying custom settings that might make sense for the user application to the internal actor
+ # system and Spray facilities used by Kamon.
+ internal-config {
+
+ akka.actor {
+ provider = "local"
+ default-dispatcher {
+ fork-join-executor {
+ parallelism-min = 2
+ parallelism-factor = 2.0
+ parallelism-max = 10
+ }
+ }
+ }
+
+ spray {
+
+ }
+ }
+
+ # Controls whether the AspectJ Weaver missing warning should be displayed if any Kamon module requiring AspectJ is
+ # found in the classpath but the application is started without the AspectJ Weaver.
+ show-aspectj-missing-warning = yes
+
+ modules {
+ # Just a place holder to ensure that the key is always available. Non-core Kamon modules should provide their
+ # settings in a module-info section.
+ }
+
+ # Add tags to all reported metrics. Can be useful to identify the source of metrics from a particluar JVM instance.
+ # Example:
+ #
+ # default-tags {
+ # host: ${?HOSTNAME}
+ # container-name: ${?CONTAINER_NAME}
+ # }
+ default-tags {
+
+ }
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala b/kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala
new file mode 100644
index 00000000..01dd4234
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala
@@ -0,0 +1,25 @@
+/* =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+package kamon
+
+import scala.util.control.NonFatal
+
+import akka.actor.ActorSystem
+
+object ActorSystemTools {
+ private[kamon] def terminateActorSystem(system: ActorSystem): Unit = {
+ system.shutdown()
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala b/kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala
new file mode 100644
index 00000000..01dd4234
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala
@@ -0,0 +1,25 @@
+/* =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+package kamon
+
+import scala.util.control.NonFatal
+
+import akka.actor.ActorSystem
+
+object ActorSystemTools {
+ private[kamon] def terminateActorSystem(system: ActorSystem): Unit = {
+ system.shutdown()
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala b/kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala
new file mode 100644
index 00000000..762201d5
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala
@@ -0,0 +1,25 @@
+/* =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+package kamon
+
+import akka.actor.ActorSystem
+
+import scala.util.control.NonFatal
+
+object ActorSystemTools {
+ private[kamon] def terminateActorSystem(system: ActorSystem): Unit = {
+ system.terminate()
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/Kamon.scala b/kamon-core/src/legacy-main/scala/kamon/Kamon.scala
new file mode 100644
index 00000000..c02d0505
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/Kamon.scala
@@ -0,0 +1,115 @@
+/* =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+package kamon
+
+import _root_.akka.actor
+import _root_.akka.actor._
+import com.typesafe.config.{Config, ConfigFactory, ConfigParseOptions, ConfigResolveOptions}
+import kamon.metric._
+import kamon.trace.TracerModuleImpl
+import kamon.util.logger.LazyLogger
+
+import _root_.scala.util.control.NonFatal
+import _root_.scala.util.{Failure, Success, Try}
+
+object Kamon {
+ trait Extension extends actor.Extension
+
+ @volatile private var kamonInstance = new Instance()
+
+ def config = kamonInstance.config
+ def metrics = kamonInstance.metrics
+ def tracer = kamonInstance.tracer
+
+ def start(): Unit = synchronized {
+ kamonInstance.start()
+ }
+
+ def start(conf: Config): Unit = synchronized {
+ kamonInstance.start(conf)
+ }
+
+ def shutdown(): Unit = synchronized {
+ kamonInstance.shutdown()
+ kamonInstance = new Instance()
+ }
+
+ private class Instance() {
+ private val log = LazyLogger(classOf[Instance])
+ private var actorSystem: ActorSystem = _
+
+ var started = false
+ var config: Config = defaultConfig
+ val metrics = MetricsModuleImpl(config)
+ val tracer = TracerModuleImpl(metrics, config)
+
+ private lazy val _start = {
+ log.info("Initializing Kamon...")
+ tryLoadAutoweaveModule()
+ actorSystem = ActorSystem("kamon", config)
+ metrics.start(actorSystem, config)
+ tracer.start(actorSystem, config)
+ actorSystem.registerExtension(ModuleLoader)
+ started = true
+ }
+
+ def start(): Unit = {
+ _start
+ }
+
+ def start(conf: Config): Unit = {
+ config = patchConfiguration(conf)
+ _start
+ }
+
+ def shutdown(): Unit = {
+ if (started) {
+ ActorSystemTools.terminateActorSystem(actorSystem)
+ }
+ }
+
+ private def defaultConfig = {
+ patchConfiguration(
+ ConfigFactory.load(
+ Thread.currentThread().getContextClassLoader(),
+ ConfigParseOptions.defaults(),
+ ConfigResolveOptions.defaults().setAllowUnresolved(true)
+ )
+ )
+ }
+
+ private def patchConfiguration(config: Config): Config = {
+ val internalConfig = config.getConfig("kamon.internal-config")
+ config
+ .withoutPath("akka")
+ .withoutPath("spray")
+ .withFallback(internalConfig)
+ }
+
+ private def tryLoadAutoweaveModule(): Unit = {
+ Try {
+ val autoweave = Class.forName("kamon.autoweave.Autoweave")
+ autoweave.getDeclaredMethod("attach").invoke(autoweave.newInstance())
+ } match {
+ case Success(_) ⇒
+ val color = (msg: String) ⇒ s"""\u001B[32m${msg}\u001B[0m"""
+ log.info(color("Kamon-autoweave has been successfully loaded."))
+ log.info(color("The AspectJ load time weaving agent is now attached to the JVM (you don't need to use -javaagent)."))
+ log.info(color("This offers extra flexibility but obviously any classes loaded before attachment will not be woven."))
+ case Failure(NonFatal(reason)) ⇒ log.debug(s"Kamon-autoweave failed to load. Reason: ${reason.getMessage}.")
+ }
+ }
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala b/kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala
new file mode 100644
index 00000000..f1b5f414
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala
@@ -0,0 +1,128 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon
+
+import _root_.akka.actor
+import _root_.akka.actor._
+import kamon.util.logger.LazyLogger
+import org.aspectj.lang.ProceedingJoinPoint
+import org.aspectj.lang.annotation.{Around, Aspect, Pointcut}
+
+private[kamon] object ModuleLoader extends ExtensionId[ModuleLoaderExtension] with ExtensionIdProvider {
+ def lookup(): ExtensionId[_ <: actor.Extension] = ModuleLoader
+ def createExtension(system: ExtendedActorSystem): ModuleLoaderExtension = new ModuleLoaderExtension(system)
+}
+
+private[kamon] class ModuleLoaderExtension(system: ExtendedActorSystem) extends Kamon.Extension {
+ val log = LazyLogger(getClass)
+ val settings = ModuleLoaderSettings(system)
+
+ if (settings.modulesRequiringAspectJ.nonEmpty && !isAspectJPresent && settings.showAspectJMissingWarning)
+ logAspectJWeaverMissing(settings.modulesRequiringAspectJ)
+
+ // Force initialization of all modules marked with auto-start.
+ settings.availableModules.filter(_.startInfo.nonEmpty).foreach {
+ case AvailableModuleInfo(name, requiresAJ, Some(ModuleStartInfo(autoStart, extensionClass))) if autoStart ⇒
+
+ system.dynamicAccess.getObjectFor[ExtensionId[Kamon.Extension]](extensionClass).map { moduleID ⇒
+ log.debug(s"Auto starting the [$name] module.")
+ moduleID.get(system)
+
+ } recover {
+ case th: Throwable ⇒ log.error(s"Failed to auto start the [$name] module.", th)
+ }
+
+ case other ⇒
+
+ }
+
+ // When AspectJ is present the kamon.supervisor.AspectJPresent aspect will make this return true.
+ def isAspectJPresent: Boolean = false
+
+ def logAspectJWeaverMissing(modulesRequiringAspectJ: List[AvailableModuleInfo]): Unit = {
+ val moduleNames = modulesRequiringAspectJ.map(_.name).mkString(", ")
+ val weaverMissingMessage =
+ """
+ |
+ | ___ _ ___ _ _ ___ ___ _ _
+ | / _ \ | | |_ | | | | | | \/ |(_) (_)
+ |/ /_\ \ ___ _ __ ___ ___ | |_ | | | | | | ___ __ _ __ __ ___ _ __ | . . | _ ___ ___ _ _ __ __ _
+ || _ |/ __|| '_ \ / _ \ / __|| __| | | | |/\| | / _ \ / _` |\ \ / // _ \| '__| | |\/| || |/ __|/ __|| || '_ \ / _` |
+ || | | |\__ \| |_) || __/| (__ | |_ /\__/ / \ /\ /| __/| (_| | \ V /| __/| | | | | || |\__ \\__ \| || | | || (_| |
+ |\_| |_/|___/| .__/ \___| \___| \__|\____/ \/ \/ \___| \__,_| \_/ \___||_| \_| |_/|_||___/|___/|_||_| |_| \__, |
+ | | | __/ |
+ | |_| |___/
+ |
+ | It seems like your application was not started with the -javaagent:/path-to-aspectj-weaver.jar option but Kamon detected
+ | the following modules which require AspectJ to work properly:
+ |
+ """.stripMargin + moduleNames +
+ """
+ |
+ | If you need help on setting up the aspectj weaver go to http://kamon.io/introduction/get-started/ for more info. On the
+ | other hand, if you are sure that you do not need or do not want to use the weaver then you can disable this error message
+ | by changing the kamon.show-aspectj-missing-warning setting in your configuration file.
+ |
+ """.stripMargin
+
+ log.error(weaverMissingMessage)
+ }
+}
+
+private[kamon] case class AvailableModuleInfo(name: String, requiresAspectJ: Boolean, startInfo: Option[ModuleStartInfo])
+private[kamon] case class ModuleStartInfo(autoStart: Boolean, extensionClass: String)
+private[kamon] case class ModuleLoaderSettings(showAspectJMissingWarning: Boolean, availableModules: List[AvailableModuleInfo]) {
+ val modulesRequiringAspectJ = availableModules.filter(_.requiresAspectJ)
+}
+
+private[kamon] object ModuleLoaderSettings {
+
+ def apply(system: ActorSystem): ModuleLoaderSettings = {
+ import kamon.util.ConfigTools.Syntax
+
+ val config = system.settings.config.getConfig("kamon.modules")
+ val showAspectJMissingWarning = system.settings.config.getBoolean("kamon.show-aspectj-missing-warning")
+
+ val modules = config.firstLevelKeys
+ val availableModules = modules.map { moduleName ⇒
+ val moduleConfig = config.getConfig(moduleName)
+ val requiresAspectJ = moduleConfig.getBoolean("requires-aspectj")
+
+ val startInfo =
+ if (moduleConfig.hasPath("auto-start") && moduleConfig.hasPath("extension-class"))
+ Some(ModuleStartInfo(moduleConfig.getBoolean("auto-start"), moduleConfig.getString("extension-class")))
+ else
+ None
+
+ AvailableModuleInfo(moduleName, requiresAspectJ, startInfo)
+
+ } toList
+
+ ModuleLoaderSettings(showAspectJMissingWarning, availableModules)
+ }
+}
+
+@Aspect
+private[kamon] class AspectJPresent {
+
+ @Pointcut("execution(* kamon.ModuleLoaderExtension.isAspectJPresent())")
+ def isAspectJPresentAtModuleSupervisor(): Unit = {}
+
+ @Around("isAspectJPresentAtModuleSupervisor()")
+ def aroundIsAspectJPresentAtModuleSupervisor(pjp: ProceedingJoinPoint): Boolean = true
+
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala b/kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala
new file mode 100644
index 00000000..91249af0
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala
@@ -0,0 +1,37 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+/**
+ * Identify a `thing` that is being monitored by Kamon. A [[kamon.metric.Entity]] is used to identify tracked `things`
+ * in both the metrics recording and reporting sides. Only the name and category fields are used with determining
+ * equality between two entities.
+ *
+ * // TODO: Find a better word for `thing`.
+ */
+case class Entity(name: String, category: String, tags: Map[String, String])
+
+object Entity {
+ def apply(name: String, category: String): Entity =
+ apply(name, category, Map.empty)
+
+ def create(name: String, category: String): Entity =
+ apply(name, category, Map.empty)
+
+ def create(name: String, category: String, tags: Map[String, String]): Entity =
+ new Entity(name, category, tags)
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala b/kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala
new file mode 100644
index 00000000..e3b136dd
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala
@@ -0,0 +1,235 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import kamon.metric.instrument.Gauge.CurrentValueCollector
+import kamon.metric.instrument.Histogram.DynamicRange
+import kamon.metric.instrument._
+import kamon.util.Function
+
+import scala.collection.concurrent.TrieMap
+import scala.concurrent.duration.FiniteDuration
+
+trait EntityRecorder {
+ def collect(collectionContext: CollectionContext): EntitySnapshot
+ def cleanup: Unit
+}
+
+trait EntityRecorderFactory[T <: EntityRecorder] {
+ def category: String
+ def createRecorder(instrumentFactory: InstrumentFactory): T
+}
+
+abstract class EntityRecorderFactoryCompanion[T <: EntityRecorder](val category: String, builder: (InstrumentFactory) ⇒ T)
+ extends EntityRecorderFactory[T] {
+
+ def createRecorder(instrumentFactory: InstrumentFactory): T = builder(instrumentFactory)
+}
+
+object EntityRecorderFactory {
+ def apply[T <: EntityRecorder](entityCategory: String, factory: InstrumentFactory ⇒ T): EntityRecorderFactory[T] =
+ new EntityRecorderFactory[T] {
+ def category: String = entityCategory
+ def createRecorder(instrumentFactory: InstrumentFactory): T = factory(instrumentFactory)
+ }
+
+ def create[T <: EntityRecorder](entityCategory: String, factory: Function[InstrumentFactory, T]): EntityRecorderFactory[T] =
+ new EntityRecorderFactory[T] {
+ def category: String = entityCategory
+ def createRecorder(instrumentFactory: InstrumentFactory): T = factory(instrumentFactory)
+ }
+}
+
+private[kamon] sealed trait SingleInstrumentEntityRecorder extends EntityRecorder {
+ def key: MetricKey
+ def instrument: Instrument
+
+ def collect(collectionContext: CollectionContext): EntitySnapshot =
+ new DefaultEntitySnapshot(Map(key → instrument.collect(collectionContext)))
+
+ def cleanup: Unit = instrument.cleanup
+}
+
+object SingleInstrumentEntityRecorder {
+ val Histogram = "histogram"
+ val MinMaxCounter = "min-max-counter"
+ val Gauge = "gauge"
+ val Counter = "counter"
+
+ val AllCategories = List("histogram", "gauge", "counter", "min-max-counter")
+}
+
+/**
+ * Entity recorder for a single Counter instrument.
+ */
+case class CounterRecorder(key: MetricKey, instrument: Counter) extends SingleInstrumentEntityRecorder
+
+/**
+ * Entity recorder for a single Histogram instrument.
+ */
+case class HistogramRecorder(key: MetricKey, instrument: Histogram) extends SingleInstrumentEntityRecorder
+
+/**
+ * Entity recorder for a single MinMaxCounter instrument.
+ */
+case class MinMaxCounterRecorder(key: MetricKey, instrument: MinMaxCounter) extends SingleInstrumentEntityRecorder
+
+/**
+ * Entity recorder for a single Gauge instrument.
+ */
+case class GaugeRecorder(key: MetricKey, instrument: Gauge) extends SingleInstrumentEntityRecorder
+
+/**
+ * Base class with plenty of utility methods to facilitate the creation of [[EntityRecorder]] implementations.
+ * It is not required to use this base class for defining a custom [[EntityRecorder]], but it is certainly
+ * the most convenient way to do it and the preferred approach throughout the Kamon codebase.
+ */
+abstract class GenericEntityRecorder(instrumentFactory: InstrumentFactory) extends EntityRecorder {
+ import kamon.util.TriemapAtomicGetOrElseUpdate.Syntax
+
+ private val _instruments = TrieMap.empty[MetricKey, Instrument]
+ private def register[T <: Instrument](key: MetricKey, instrument: ⇒ T): T =
+ _instruments.atomicGetOrElseUpdate(key, instrument, _.cleanup).asInstanceOf[T]
+
+ protected def histogram(name: String): Histogram =
+ register(HistogramKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createHistogram(name))
+
+ protected def histogram(name: String, dynamicRange: DynamicRange): Histogram =
+ register(HistogramKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createHistogram(name, Some(dynamicRange)))
+
+ protected def histogram(name: String, unitOfMeasurement: UnitOfMeasurement): Histogram =
+ register(HistogramKey(name, unitOfMeasurement), instrumentFactory.createHistogram(name))
+
+ protected def histogram(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement): Histogram =
+ register(HistogramKey(name, unitOfMeasurement), instrumentFactory.createHistogram(name, Some(dynamicRange)))
+
+ protected def removeHistogram(name: String): Unit =
+ _instruments.remove(HistogramKey(name, UnitOfMeasurement.Unknown))
+
+ protected def removeHistogram(name: String, unitOfMeasurement: UnitOfMeasurement): Unit =
+ _instruments.remove(HistogramKey(name, unitOfMeasurement))
+
+ protected def minMaxCounter(name: String): MinMaxCounter =
+ register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name))
+
+ protected def minMaxCounter(name: String, dynamicRange: DynamicRange): MinMaxCounter =
+ register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange)))
+
+ protected def minMaxCounter(name: String, refreshInterval: FiniteDuration): MinMaxCounter =
+ register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, refreshInterval = Some(refreshInterval)))
+
+ protected def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter =
+ register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name))
+
+ protected def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter =
+ register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange), Some(refreshInterval)))
+
+ protected def minMaxCounter(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter =
+ register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange)))
+
+ protected def minMaxCounter(name: String, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter =
+ register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name, refreshInterval = Some(refreshInterval)))
+
+ protected def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter =
+ register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange), Some(refreshInterval)))
+
+ protected def minMaxCounter(key: MinMaxCounterKey): MinMaxCounter =
+ register(key, instrumentFactory.createMinMaxCounter(key.name))
+
+ protected def minMaxCounter(key: MinMaxCounterKey, dynamicRange: DynamicRange): MinMaxCounter =
+ register(key, instrumentFactory.createMinMaxCounter(key.name, Some(dynamicRange)))
+
+ protected def minMaxCounter(key: MinMaxCounterKey, refreshInterval: FiniteDuration): MinMaxCounter =
+ register(key, instrumentFactory.createMinMaxCounter(key.name, refreshInterval = Some(refreshInterval)))
+
+ protected def minMaxCounter(key: MinMaxCounterKey, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter =
+ register(key, instrumentFactory.createMinMaxCounter(key.name, Some(dynamicRange), Some(refreshInterval)))
+
+ protected def removeMinMaxCounter(name: String): Unit =
+ _instruments.remove(MinMaxCounterKey(name, UnitOfMeasurement.Unknown))
+
+ protected def removeMinMaxCounter(key: MinMaxCounterKey): Unit =
+ _instruments.remove(key)
+
+ protected def gauge(name: String, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, valueCollector = valueCollector))
+
+ protected def gauge(name: String, dynamicRange: DynamicRange, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, Some(dynamicRange), valueCollector = valueCollector))
+
+ protected def gauge(name: String, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector))
+
+ protected def gauge(name: String, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, valueCollector = valueCollector))
+
+ protected def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, Some(dynamicRange), Some(refreshInterval), valueCollector = valueCollector))
+
+ protected def gauge(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, Some(dynamicRange), valueCollector = valueCollector))
+
+ protected def gauge(name: String, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector))
+
+ protected def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge =
+ register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, Some(dynamicRange), Some(refreshInterval), valueCollector))
+
+ protected def gauge(key: GaugeKey, valueCollector: CurrentValueCollector): Gauge =
+ register(key, instrumentFactory.createGauge(key.name, valueCollector = valueCollector))
+
+ protected def gauge(key: GaugeKey, dynamicRange: DynamicRange, valueCollector: CurrentValueCollector): Gauge =
+ register(key, instrumentFactory.createGauge(key.name, Some(dynamicRange), valueCollector = valueCollector))
+
+ protected def gauge(key: GaugeKey, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge =
+ register(key, instrumentFactory.createGauge(key.name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector))
+
+ protected def gauge(key: GaugeKey, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge =
+ register(key, instrumentFactory.createGauge(key.name, Some(dynamicRange), Some(refreshInterval), valueCollector = valueCollector))
+
+ protected def removeGauge(name: String): Unit =
+ _instruments.remove(GaugeKey(name, UnitOfMeasurement.Unknown))
+
+ protected def removeGauge(key: GaugeKey): Unit =
+ _instruments.remove(key)
+
+ protected def counter(name: String): Counter =
+ register(CounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createCounter())
+
+ protected def counter(name: String, unitOfMeasurement: UnitOfMeasurement): Counter =
+ register(CounterKey(name, unitOfMeasurement), instrumentFactory.createCounter())
+
+ protected def counter(key: CounterKey): Counter =
+ register(key, instrumentFactory.createCounter())
+
+ protected def removeCounter(name: String): Unit =
+ _instruments.remove(CounterKey(name, UnitOfMeasurement.Unknown))
+
+ protected def removeCounter(key: CounterKey): Unit =
+ _instruments.remove(key)
+
+ def collect(collectionContext: CollectionContext): EntitySnapshot = {
+ val snapshots = Map.newBuilder[MetricKey, InstrumentSnapshot]
+ _instruments.foreach {
+ case (key, instrument) ⇒ snapshots += key → instrument.collect(collectionContext)
+ }
+
+ new DefaultEntitySnapshot(snapshots.result())
+ }
+
+ def cleanup: Unit = _instruments.values.foreach(_.cleanup)
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala b/kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala
new file mode 100644
index 00000000..16edecd8
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala
@@ -0,0 +1,63 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import kamon.metric.instrument.{Counter, Histogram, CollectionContext, InstrumentSnapshot}
+import kamon.util.MapMerge
+import scala.reflect.ClassTag
+
+trait EntitySnapshot {
+ def metrics: Map[MetricKey, InstrumentSnapshot]
+ def merge(that: EntitySnapshot, collectionContext: CollectionContext): EntitySnapshot
+
+ def histogram(name: String): Option[Histogram.Snapshot] =
+ find[HistogramKey, Histogram.Snapshot](name)
+
+ def minMaxCounter(name: String): Option[Histogram.Snapshot] =
+ find[MinMaxCounterKey, Histogram.Snapshot](name)
+
+ def gauge(name: String): Option[Histogram.Snapshot] =
+ find[GaugeKey, Histogram.Snapshot](name)
+
+ def counter(name: String): Option[Counter.Snapshot] =
+ find[CounterKey, Counter.Snapshot](name)
+
+ def histograms: Map[HistogramKey, Histogram.Snapshot] =
+ filterByType[HistogramKey, Histogram.Snapshot]
+
+ def minMaxCounters: Map[MinMaxCounterKey, Histogram.Snapshot] =
+ filterByType[MinMaxCounterKey, Histogram.Snapshot]
+
+ def gauges: Map[GaugeKey, Histogram.Snapshot] =
+ filterByType[GaugeKey, Histogram.Snapshot]
+
+ def counters: Map[CounterKey, Counter.Snapshot] =
+ filterByType[CounterKey, Counter.Snapshot]
+
+ private def filterByType[K <: MetricKey, V <: InstrumentSnapshot](implicit keyCT: ClassTag[K]): Map[K, V] =
+ metrics.collect { case (k, v) if keyCT.runtimeClass.isInstance(k) ⇒ (k.asInstanceOf[K], v.asInstanceOf[V]) }
+
+ private def find[K <: MetricKey, V <: InstrumentSnapshot](name: String)(implicit keyCT: ClassTag[K]) =
+ metrics.find { case (k, v) ⇒ keyCT.runtimeClass.isInstance(k) && k.name == name } map (_._2.asInstanceOf[V])
+}
+
+class DefaultEntitySnapshot(val metrics: Map[MetricKey, InstrumentSnapshot]) extends EntitySnapshot {
+ import MapMerge.Syntax
+
+ override def merge(that: EntitySnapshot, collectionContext: CollectionContext): EntitySnapshot =
+ new DefaultEntitySnapshot(metrics.merge(that.metrics, (l, r) ⇒ l.merge(r, collectionContext)))
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala
new file mode 100644
index 00000000..0d4e0163
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala
@@ -0,0 +1,47 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import kamon.metric.instrument.UnitOfMeasurement
+
+/**
+ * MetricKeys are used to identify a given metric in entity recorders and snapshots.
+ */
+sealed trait MetricKey {
+ def name: String
+ def unitOfMeasurement: UnitOfMeasurement
+}
+
+/**
+ * MetricKey for all Histogram-based metrics.
+ */
+private[kamon] case class HistogramKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey
+
+/**
+ * MetricKey for all MinMaxCounter-based metrics.
+ */
+private[kamon] case class MinMaxCounterKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey
+
+/**
+ * MetricKey for all Gauge-based metrics.
+ */
+private[kamon] case class GaugeKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey
+
+/**
+ * MetricKey for all Counter-based metrics.
+ */
+private[kamon] case class CounterKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala
new file mode 100644
index 00000000..06de65ef
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala
@@ -0,0 +1,57 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import akka.actor.{Actor, ActorRef, Props}
+import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot
+import kamon.metric.instrument._
+
+/**
+ * Can be used as a decorator to scale TickMetricSnapshot messages to given `timeUnits` and/or `memoryUnits`
+ * before forwarding to original receiver
+ * @param timeUnits Optional time units to scale time metrics to
+ * @param memoryUnits Optional memory units to scale memory metrics to
+ * @param receiver Receiver of scaled metrics snapshot, usually a backend sender
+ */
+class MetricScaleDecorator(timeUnits: Option[Time], memoryUnits: Option[Memory], receiver: ActorRef) extends Actor {
+ require(
+ timeUnits.isDefined || memoryUnits.isDefined,
+ "Use MetricScaleDecorator only when any of units is defined"
+ )
+
+ override def receive: Receive = {
+ case tick: TickMetricSnapshot ⇒
+ val scaled = tick.copy(metrics = tick.metrics.mapValues { entitySnapshot ⇒
+ new DefaultEntitySnapshot(entitySnapshot.metrics.map {
+ case (metricKey, metricSnapshot) ⇒
+ val scaledSnapshot = (metricKey.unitOfMeasurement, timeUnits, memoryUnits) match {
+ case (time: Time, Some(to), _) ⇒ metricSnapshot.scale(time, to)
+ case (memory: Memory, _, Some(to)) ⇒ metricSnapshot.scale(memory, to)
+ case _ ⇒ metricSnapshot
+ }
+ metricKey → scaledSnapshot
+ })
+ })
+ receiver forward scaled
+ }
+}
+
+object MetricScaleDecorator {
+ def props(timeUnits: Option[Time], memoryUnits: Option[Memory], receiver: ActorRef): Props =
+ Props(new MetricScaleDecorator(timeUnits, memoryUnits, receiver))
+}
+
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala
new file mode 100755
index 00000000..7c85bb02
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala
@@ -0,0 +1,394 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import java.util.Map.Entry
+
+import akka.actor._
+import com.typesafe.config.{Config, ConfigValue, ConfigValueType}
+import kamon.metric.SubscriptionsDispatcher.{Subscribe, Unsubscribe}
+import kamon.metric.instrument.Gauge.CurrentValueCollector
+import kamon.metric.instrument.Histogram.DynamicRange
+import kamon.metric.instrument._
+import kamon.util.LazyActorRef
+
+import scala.collection.JavaConverters._
+import scala.collection.concurrent.TrieMap
+import scala.concurrent.duration.FiniteDuration
+
+case class EntityRegistration[T <: EntityRecorder](entity: Entity, recorder: T)
+
+trait MetricsModule {
+ def settings: MetricsSettings
+
+ def shouldTrack(entity: Entity): Boolean
+
+ def shouldTrack(entityName: String, category: String): Boolean =
+ shouldTrack(Entity(entityName, category))
+
+ //
+ // Histograms registration and removal
+ //
+
+ def histogram(name: String): Histogram =
+ registerHistogram(name)
+
+ def histogram(name: String, unitOfMeasurement: UnitOfMeasurement): Histogram =
+ registerHistogram(name, unitOfMeasurement = Some(unitOfMeasurement))
+
+ def histogram(name: String, dynamicRange: DynamicRange): Histogram =
+ registerHistogram(name, dynamicRange = Some(dynamicRange))
+
+ def histogram(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): Histogram =
+ registerHistogram(name, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange))
+
+ def histogram(name: String, tags: Map[String, String]): Histogram =
+ registerHistogram(name, tags)
+
+ def histogram(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): Histogram =
+ registerHistogram(name, tags, Some(unitOfMeasurement))
+
+ def histogram(name: String, tags: Map[String, String], dynamicRange: DynamicRange): Histogram =
+ registerHistogram(name, tags, dynamicRange = Some(dynamicRange))
+
+ def histogram(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): Histogram =
+ registerHistogram(name, tags, Some(unitOfMeasurement), Some(dynamicRange))
+
+ def removeHistogram(name: String): Boolean =
+ removeHistogram(name, Map.empty)
+
+ def registerHistogram(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None,
+ dynamicRange: Option[DynamicRange] = None): Histogram
+
+ def removeHistogram(name: String, tags: Map[String, String]): Boolean
+
+ //
+ // MinMaxCounter registration and removal
+ //
+
+ def minMaxCounter(name: String): MinMaxCounter =
+ registerMinMaxCounter(name)
+
+ def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter =
+ registerMinMaxCounter(name, unitOfMeasurement = Some(unitOfMeasurement))
+
+ def minMaxCounter(name: String, dynamicRange: DynamicRange): MinMaxCounter =
+ registerMinMaxCounter(name, dynamicRange = Some(dynamicRange))
+
+ def minMaxCounter(name: String, refreshInterval: FiniteDuration): MinMaxCounter =
+ registerMinMaxCounter(name, refreshInterval = Some(refreshInterval))
+
+ def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter =
+ registerMinMaxCounter(name, dynamicRange = Some(dynamicRange), refreshInterval = Some(refreshInterval))
+
+ def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): MinMaxCounter =
+ registerMinMaxCounter(name, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange))
+
+ def minMaxCounter(name: String, tags: Map[String, String]): MinMaxCounter =
+ registerMinMaxCounter(name, tags)
+
+ def minMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): MinMaxCounter =
+ registerMinMaxCounter(name, tags, Some(unitOfMeasurement))
+
+ def minMaxCounter(name: String, tags: Map[String, String], dynamicRange: DynamicRange): MinMaxCounter =
+ registerMinMaxCounter(name, tags, dynamicRange = Some(dynamicRange))
+
+ def minMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): MinMaxCounter =
+ registerMinMaxCounter(name, tags, Some(unitOfMeasurement), Some(dynamicRange))
+
+ def removeMinMaxCounter(name: String): Boolean =
+ removeMinMaxCounter(name, Map.empty)
+
+ def removeMinMaxCounter(name: String, tags: Map[String, String]): Boolean
+
+ def registerMinMaxCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None,
+ dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None): MinMaxCounter
+
+ //
+ // Gauge registration and removal
+ //
+
+ def gauge(name: String)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector)
+
+ def gauge(name: String, unitOfMeasurement: UnitOfMeasurement)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, unitOfMeasurement = Some(unitOfMeasurement))
+
+ def gauge(name: String, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, dynamicRange = Some(dynamicRange))
+
+ def gauge(name: String, refreshInterval: FiniteDuration)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, refreshInterval = Some(refreshInterval))
+
+ def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, dynamicRange = Some(dynamicRange), refreshInterval = Some(refreshInterval))
+
+ def gauge(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange))
+
+ def gauge(name: String, tags: Map[String, String])(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, tags)
+
+ def gauge(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, tags, Some(unitOfMeasurement))
+
+ def gauge(name: String, tags: Map[String, String], dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, tags, dynamicRange = Some(dynamicRange))
+
+ def gauge(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge =
+ registerGauge(name, valueCollector, tags, Some(unitOfMeasurement), Some(dynamicRange))
+
+ def removeGauge(name: String): Boolean =
+ removeGauge(name, Map.empty)
+
+ def removeGauge(name: String, tags: Map[String, String]): Boolean
+
+ def registerGauge(name: String, valueCollector: CurrentValueCollector, tags: Map[String, String] = Map.empty,
+ unitOfMeasurement: Option[UnitOfMeasurement] = None, dynamicRange: Option[DynamicRange] = None,
+ refreshInterval: Option[FiniteDuration] = None): Gauge
+
+ //
+ // Counters registration and removal
+ //
+
+ def counter(name: String): Counter =
+ registerCounter(name)
+
+ def counter(name: String, unitOfMeasurement: UnitOfMeasurement): Counter =
+ registerCounter(name, unitOfMeasurement = Some(unitOfMeasurement))
+
+ def counter(name: String, tags: Map[String, String]): Counter =
+ registerCounter(name, tags)
+
+ def counter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): Counter =
+ registerCounter(name, tags, Some(unitOfMeasurement))
+
+ def removeCounter(name: String): Boolean =
+ removeCounter(name, Map.empty)
+
+ def removeCounter(name: String, tags: Map[String, String]): Boolean
+
+ def registerCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None,
+ dynamicRange: Option[DynamicRange] = None): Counter
+
+ //
+ // Entities registration and removal
+ //
+
+ def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], name: String): T =
+ entity(recorderFactory, Entity(name, recorderFactory.category))
+
+ def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], name: String, tags: Map[String, String]): T =
+ entity(recorderFactory, Entity(name, recorderFactory.category, tags))
+
+ def removeEntity(name: String, category: String): Boolean =
+ removeEntity(Entity(name, category, Map.empty))
+
+ def removeEntity(name: String, category: String, tags: Map[String, String]): Boolean =
+ removeEntity(Entity(name, category, tags))
+
+ def removeEntity(entity: Entity): Boolean
+
+ def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], entity: Entity): T
+
+ def find(name: String, category: String): Option[EntityRecorder] =
+ find(Entity(name, category))
+
+ def find(name: String, category: String, tags: Map[String, String]): Option[EntityRecorder] =
+ find(Entity(name, category, tags))
+
+ def find(entity: Entity): Option[EntityRecorder]
+
+ def subscribe(filter: SubscriptionFilter, subscriber: ActorRef): Unit =
+ subscribe(filter, subscriber, permanently = true)
+
+ def subscribe(category: String, selection: String, subscriber: ActorRef, permanently: Boolean): Unit =
+ subscribe(SubscriptionFilter(category, selection), subscriber, permanently)
+
+ def subscribe(category: String, selection: String, subscriber: ActorRef): Unit =
+ subscribe(SubscriptionFilter(category, selection), subscriber, permanently = true)
+
+ def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanently: Boolean): Unit
+
+ def unsubscribe(subscriber: ActorRef): Unit
+
+ def buildDefaultCollectionContext: CollectionContext
+
+ def instrumentFactory(category: String): InstrumentFactory
+}
+
+private[kamon] class MetricsModuleImpl(config: Config) extends MetricsModule {
+ import kamon.util.TriemapAtomicGetOrElseUpdate.Syntax
+
+ private val _trackedEntities = TrieMap.empty[Entity, EntityRecorder]
+ private val _subscriptions = new LazyActorRef
+
+ @volatile var settings = MetricsSettings(config)
+
+ val defaultTags: Map[String, String] = if (config.hasPath("kamon.default-tags")) {
+ config.getConfig("kamon.default-tags").resolve().entrySet().asScala
+ .collect {
+ case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.STRING =>
+ (e.getKey, e.getValue.unwrapped().asInstanceOf[String])
+ case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.NUMBER =>
+ (e.getKey, e.getValue.unwrapped().asInstanceOf[Int].toString)
+ case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.BOOLEAN =>
+ (e.getKey, e.getValue.unwrapped().asInstanceOf[Boolean].toString)
+ }.toMap
+ }
+ else {
+ Map.empty
+ }
+
+ def shouldTrack(entity: Entity): Boolean =
+ settings.entityFilters.get(entity.category).map {
+ filter ⇒ filter.accept(entity.name)
+
+ } getOrElse (settings.trackUnmatchedEntities)
+
+ def registerHistogram(name: String, tags: Map[String, String], unitOfMeasurement: Option[UnitOfMeasurement],
+ dynamicRange: Option[DynamicRange]): Histogram = {
+
+ val histogramEntity = Entity(name, SingleInstrumentEntityRecorder.Histogram, tags ++ defaultTags)
+ val recorder = _trackedEntities.atomicGetOrElseUpdate(histogramEntity, {
+ val factory = instrumentFactory(histogramEntity.category)
+ HistogramRecorder(
+ HistogramKey(histogramEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)),
+ factory.createHistogram(name, dynamicRange)
+ )
+ }, _.cleanup)
+
+ recorder.asInstanceOf[HistogramRecorder].instrument
+ }
+
+ def removeHistogram(name: String, tags: Map[String, String]): Boolean =
+ _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Histogram, tags ++ defaultTags)).isDefined
+
+ def registerMinMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: Option[UnitOfMeasurement], dynamicRange: Option[DynamicRange],
+ refreshInterval: Option[FiniteDuration]): MinMaxCounter = {
+
+ val minMaxCounterEntity = Entity(name, SingleInstrumentEntityRecorder.MinMaxCounter, tags ++ defaultTags)
+ val recorder = _trackedEntities.atomicGetOrElseUpdate(minMaxCounterEntity, {
+ val factory = instrumentFactory(minMaxCounterEntity.category)
+ MinMaxCounterRecorder(
+ MinMaxCounterKey(minMaxCounterEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)),
+ factory.createMinMaxCounter(name, dynamicRange, refreshInterval)
+ )
+ }, _.cleanup)
+
+ recorder.asInstanceOf[MinMaxCounterRecorder].instrument
+ }
+
+ def removeMinMaxCounter(name: String, tags: Map[String, String]): Boolean =
+ _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.MinMaxCounter, tags ++ defaultTags)).isDefined
+
+ def registerGauge(name: String, valueCollector: CurrentValueCollector, tags: Map[String, String] = Map.empty,
+ unitOfMeasurement: Option[UnitOfMeasurement] = None, dynamicRange: Option[DynamicRange] = None,
+ refreshInterval: Option[FiniteDuration] = None): Gauge = {
+
+ val gaugeEntity = Entity(name, SingleInstrumentEntityRecorder.Gauge, tags ++ defaultTags)
+ val recorder = _trackedEntities.atomicGetOrElseUpdate(gaugeEntity, {
+ val factory = instrumentFactory(gaugeEntity.category)
+ GaugeRecorder(
+ GaugeKey(gaugeEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)),
+ factory.createGauge(name, dynamicRange, refreshInterval, valueCollector)
+ )
+ }, _.cleanup)
+
+ recorder.asInstanceOf[GaugeRecorder].instrument
+ }
+
+ def removeGauge(name: String, tags: Map[String, String]): Boolean =
+ _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Gauge, tags ++ defaultTags)).isDefined
+
+ def registerCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None,
+ dynamicRange: Option[DynamicRange] = None): Counter = {
+
+ val counterEntity = Entity(name, SingleInstrumentEntityRecorder.Counter, tags ++ defaultTags)
+ val recorder = _trackedEntities.atomicGetOrElseUpdate(counterEntity, {
+ val factory = instrumentFactory(counterEntity.category)
+ CounterRecorder(
+ CounterKey(counterEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)),
+ factory.createCounter()
+ )
+ }, _.cleanup)
+
+ recorder.asInstanceOf[CounterRecorder].instrument
+ }
+
+ def removeCounter(name: String, tags: Map[String, String]): Boolean =
+ _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Counter, tags ++ defaultTags)).isDefined
+
+ def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], entity: Entity): T = {
+ _trackedEntities.atomicGetOrElseUpdate(entity.copy(tags = entity.tags ++ defaultTags), {
+ recorderFactory.createRecorder(instrumentFactory(recorderFactory.category))
+ }, _.cleanup).asInstanceOf[T]
+ }
+
+ def removeEntity(entity: Entity): Boolean = {
+ val removedEntity = _trackedEntities.remove(entity.copy(tags = entity.tags ++ defaultTags))
+ removedEntity.foreach(_.cleanup)
+ removedEntity.isDefined
+ }
+
+ def find(entity: Entity): Option[EntityRecorder] =
+ _trackedEntities.get(entity.copy(tags = entity.tags ++ defaultTags))
+
+ def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanent: Boolean): Unit =
+ _subscriptions.tell(Subscribe(filter, subscriber, permanent))
+
+ def unsubscribe(subscriber: ActorRef): Unit =
+ _subscriptions.tell(Unsubscribe(subscriber))
+
+ def buildDefaultCollectionContext: CollectionContext =
+ CollectionContext(settings.defaultCollectionContextBufferSize)
+
+ def instrumentFactory(category: String): InstrumentFactory =
+ settings.instrumentFactories.getOrElse(category, settings.defaultInstrumentFactory)
+
+ private[kamon] def collectSnapshots(collectionContext: CollectionContext): Map[Entity, EntitySnapshot] = {
+ val builder = Map.newBuilder[Entity, EntitySnapshot]
+ _trackedEntities.foreach {
+ case (identity, recorder) ⇒ builder += ((identity, recorder.collect(collectionContext)))
+ }
+
+ builder.result()
+ }
+
+ /**
+ * Metrics Extension initialization.
+ */
+ private var _system: ActorSystem = null
+ private lazy val _start = {
+ _subscriptions.point(_system.actorOf(SubscriptionsDispatcher.props(settings.tickInterval, this), "metrics"))
+ settings.pointScheduler(DefaultRefreshScheduler(_system.scheduler, _system.dispatcher))
+ }
+
+ def start(system: ActorSystem, newConfig: Config): Unit = synchronized {
+ settings = MetricsSettings(newConfig)
+ _system = system
+ _start
+ _system = null
+ }
+}
+
+private[kamon] object MetricsModuleImpl {
+
+ def apply(config: Config) =
+ new MetricsModuleImpl(config)
+}
+
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala
new file mode 100644
index 00000000..592e8f67
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala
@@ -0,0 +1,123 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import com.typesafe.config.Config
+import kamon.metric.instrument._
+import kamon.util.PathFilter
+import kamon.util.GlobPathFilter
+import kamon.util.RegexPathFilter
+
+import scala.concurrent.duration.FiniteDuration
+
+/**
+ * Configuration settings for the Metrics extension, as read from the `kamon.metric` configuration key.
+ */
+case class MetricsSettings(
+ tickInterval: FiniteDuration,
+ defaultCollectionContextBufferSize: Int,
+ trackUnmatchedEntities: Boolean,
+ entityFilters: Map[String, EntityFilter],
+ instrumentFactories: Map[String, InstrumentFactory],
+ defaultInstrumentFactory: InstrumentFactory,
+ refreshScheduler: RefreshScheduler
+) {
+
+ private[kamon] def pointScheduler(targetScheduler: RefreshScheduler): Unit = refreshScheduler match {
+ case lrs: LazyRefreshScheduler ⇒ lrs.point(targetScheduler)
+ case others ⇒
+ }
+}
+
+/**
+ *
+ */
+case class EntityFilter(includes: List[PathFilter], excludes: List[PathFilter]) {
+ def accept(name: String): Boolean =
+ includes.exists(_.accept(name)) && !excludes.exists(_.accept(name))
+}
+
+object MetricsSettings {
+ import kamon.util.ConfigTools.Syntax
+
+ def apply(config: Config): MetricsSettings = {
+ val metricConfig = config.getConfig("kamon.metric")
+
+ val tickInterval = metricConfig.getFiniteDuration("tick-interval")
+ val collectBufferSize = metricConfig.getInt("default-collection-context-buffer-size")
+ val trackUnmatchedEntities = metricConfig.getBoolean("track-unmatched-entities")
+ val entityFilters = loadFilters(metricConfig.getConfig("filters"))
+ val defaultInstrumentSettings = DefaultInstrumentSettings.fromConfig(metricConfig.getConfig("default-instrument-settings"))
+
+ val refreshScheduler = new LazyRefreshScheduler
+ val instrumentFactories = loadInstrumentFactories(metricConfig.getConfig("instrument-settings"), defaultInstrumentSettings, refreshScheduler)
+ val defaultInstrumentFactory = new InstrumentFactory(Map.empty, defaultInstrumentSettings, refreshScheduler)
+
+ MetricsSettings(tickInterval, collectBufferSize, trackUnmatchedEntities, entityFilters, instrumentFactories,
+ defaultInstrumentFactory, refreshScheduler)
+ }
+
+ /**
+ * Load all the default filters configured under the `kamon.metric.filters` configuration key. All filters are
+ * defined with the entity category as a sub-key of the `kamon.metric.filters` key and two sub-keys to it: includes
+ * and excludes with lists of string glob or regex patterns as values ('asRegex' defaults to false). Example:
+ *
+ * {{{
+ *
+ * kamon.metrics.filters {
+ * actor {
+ * includes = ["user/test-actor", "user/service/worker-*"]
+ * excludes = ["user/IO-*"]
+ * asRegex = false
+ * }
+ * }
+ *
+ * }}}
+ *
+ * @return a Map from category name to corresponding entity filter.
+ */
+ def loadFilters(filtersConfig: Config): Map[String, EntityFilter] = {
+ import scala.collection.JavaConverters._
+
+ filtersConfig.firstLevelKeys map { category: String ⇒
+ val asRegex = if (filtersConfig.hasPath(s"$category.asRegex")) filtersConfig.getBoolean(s"$category.asRegex") else false
+ val includes = filtersConfig.getStringList(s"$category.includes").asScala.map(inc ⇒
+ if (asRegex) RegexPathFilter(inc) else new GlobPathFilter(inc)).toList
+ val excludes = filtersConfig.getStringList(s"$category.excludes").asScala.map(exc ⇒
+ if (asRegex) RegexPathFilter(exc) else new GlobPathFilter(exc)).toList
+
+ (category, EntityFilter(includes, excludes))
+ } toMap
+ }
+
+ /**
+ * Load any custom configuration settings defined under the `kamon.metric.instrument-settings` configuration key and
+ * create InstrumentFactories for them.
+ *
+ * @return a Map from category name to InstrumentFactory.
+ */
+ def loadInstrumentFactories(instrumentSettings: Config, defaults: DefaultInstrumentSettings, refreshScheduler: RefreshScheduler): Map[String, InstrumentFactory] = {
+ instrumentSettings.firstLevelKeys.map { category ⇒
+ val categoryConfig = instrumentSettings.getConfig(category)
+ val customSettings = categoryConfig.firstLevelKeys.map { instrumentName ⇒
+ (instrumentName, InstrumentCustomSettings.fromConfig(categoryConfig.getConfig(instrumentName)))
+ } toMap
+
+ (category, new InstrumentFactory(customSettings, defaults, refreshScheduler))
+ } toMap
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala b/kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala
new file mode 100644
index 00000000..09bf58ad
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala
@@ -0,0 +1,116 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import akka.actor._
+import kamon.metric.SubscriptionsDispatcher._
+import kamon.util.{MilliTimestamp, GlobPathFilter}
+import scala.concurrent.duration.FiniteDuration
+
+/**
+ * Manages subscriptions to metrics and dispatch snapshots on every tick to all subscribers.
+ */
+private[kamon] class SubscriptionsDispatcher(interval: FiniteDuration, metricsExtension: MetricsModuleImpl) extends Actor {
+ var lastTick = MilliTimestamp.now
+ var oneShotSubscriptions = Map.empty[ActorRef, SubscriptionFilter]
+ var permanentSubscriptions = Map.empty[ActorRef, SubscriptionFilter]
+ val tickSchedule = context.system.scheduler.schedule(interval, interval, self, Tick)(context.dispatcher)
+ val collectionContext = metricsExtension.buildDefaultCollectionContext
+
+ def receive = {
+ case Tick ⇒ processTick()
+ case Subscribe(filter, subscriber, permanently) ⇒ subscribe(filter, subscriber, permanently)
+ case Unsubscribe(subscriber) ⇒ unsubscribe(subscriber)
+ case Terminated(subscriber) ⇒ unsubscribe(subscriber)
+ }
+
+ def processTick(): Unit =
+ dispatch(metricsExtension.collectSnapshots(collectionContext))
+
+ def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanent: Boolean): Unit = {
+ def addSubscription(storage: Map[ActorRef, SubscriptionFilter]): Map[ActorRef, SubscriptionFilter] =
+ storage.updated(subscriber, storage.getOrElse(subscriber, SubscriptionFilter.Empty).combine(filter))
+
+ context.watch(subscriber)
+
+ if (permanent)
+ permanentSubscriptions = addSubscription(permanentSubscriptions)
+ else
+ oneShotSubscriptions = addSubscription(oneShotSubscriptions)
+ }
+
+ def unsubscribe(subscriber: ActorRef): Unit = {
+ permanentSubscriptions = permanentSubscriptions - subscriber
+ oneShotSubscriptions = oneShotSubscriptions - subscriber
+ }
+
+ def dispatch(snapshots: Map[Entity, EntitySnapshot]): Unit = {
+ val currentTick = MilliTimestamp.now
+
+ dispatchSelections(lastTick, currentTick, permanentSubscriptions, snapshots)
+ dispatchSelections(lastTick, currentTick, oneShotSubscriptions, snapshots)
+
+ lastTick = currentTick
+ oneShotSubscriptions = Map.empty[ActorRef, SubscriptionFilter]
+ }
+
+ def dispatchSelections(lastTick: MilliTimestamp, currentTick: MilliTimestamp, subscriptions: Map[ActorRef, SubscriptionFilter],
+ snapshots: Map[Entity, EntitySnapshot]): Unit = {
+
+ for ((subscriber, filter) ← subscriptions) {
+ val selection = snapshots.filter(group ⇒ filter.accept(group._1))
+ val tickMetrics = TickMetricSnapshot(lastTick, currentTick, selection)
+
+ subscriber ! tickMetrics
+ }
+ }
+}
+
+object SubscriptionsDispatcher {
+ def props(interval: FiniteDuration, metricsExtension: MetricsModuleImpl): Props =
+ Props(new SubscriptionsDispatcher(interval, metricsExtension))
+
+ case object Tick
+ case class Unsubscribe(subscriber: ActorRef)
+ case class Subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanently: Boolean = false)
+ case class TickMetricSnapshot(from: MilliTimestamp, to: MilliTimestamp, metrics: Map[Entity, EntitySnapshot])
+
+}
+
+trait SubscriptionFilter { self ⇒
+
+ def accept(entity: Entity): Boolean
+
+ final def combine(that: SubscriptionFilter): SubscriptionFilter = new SubscriptionFilter {
+ override def accept(entity: Entity): Boolean = self.accept(entity) || that.accept(entity)
+ }
+}
+
+object SubscriptionFilter {
+ val Empty = new SubscriptionFilter {
+ def accept(entity: Entity): Boolean = false
+ }
+
+ def apply(category: String, name: String): SubscriptionFilter = new SubscriptionFilter {
+ val categoryPattern = new GlobPathFilter(category)
+ val namePattern = new GlobPathFilter(name)
+
+ def accept(entity: Entity): Boolean = {
+ categoryPattern.accept(entity.category) && namePattern.accept(entity.name)
+ }
+ }
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala b/kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala
new file mode 100644
index 00000000..22557974
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala
@@ -0,0 +1,65 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import akka.actor.{Props, Actor, ActorRef}
+import kamon.Kamon
+import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot
+import kamon.metric.TickMetricSnapshotBuffer.FlushBuffer
+import kamon.metric.instrument.CollectionContext
+import kamon.util.MapMerge
+
+import scala.concurrent.duration.FiniteDuration
+
+class TickMetricSnapshotBuffer(flushInterval: FiniteDuration, receiver: ActorRef) extends Actor {
+ import MapMerge.Syntax
+
+ val flushSchedule = context.system.scheduler.schedule(flushInterval, flushInterval, self, FlushBuffer)(context.dispatcher)
+ val collectionContext: CollectionContext = Kamon.metrics.buildDefaultCollectionContext
+
+ def receive = empty
+
+ def empty: Actor.Receive = {
+ case tick: TickMetricSnapshot ⇒ context become (buffering(tick))
+ case FlushBuffer ⇒ // Nothing to flush.
+ }
+
+ def buffering(buffered: TickMetricSnapshot): Actor.Receive = {
+ case TickMetricSnapshot(_, to, tickMetrics) ⇒
+ val combinedMetrics = buffered.metrics.merge(tickMetrics, (l, r) ⇒ l.merge(r, collectionContext))
+ val combinedSnapshot = TickMetricSnapshot(buffered.from, to, combinedMetrics)
+
+ context become (buffering(combinedSnapshot))
+
+ case FlushBuffer ⇒
+ receiver ! buffered
+ context become (empty)
+
+ }
+
+ override def postStop(): Unit = {
+ flushSchedule.cancel()
+ super.postStop()
+ }
+}
+
+object TickMetricSnapshotBuffer {
+ case object FlushBuffer
+
+ def props(flushInterval: FiniteDuration, receiver: ActorRef): Props =
+ Props[TickMetricSnapshotBuffer](new TickMetricSnapshotBuffer(flushInterval, receiver))
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala b/kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala
new file mode 100644
index 00000000..eaeebb97
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala
@@ -0,0 +1,53 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric
+
+import kamon.metric.instrument.{Time, InstrumentFactory}
+
+class TraceMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) {
+
+ /**
+ * Records blah blah
+ */
+ val elapsedTime = histogram("elapsed-time", unitOfMeasurement = Time.Nanoseconds)
+ val errors = counter("errors")
+}
+
+object TraceMetrics extends EntityRecorderFactory[TraceMetrics] {
+ def category: String = "trace"
+ def createRecorder(instrumentFactory: InstrumentFactory): TraceMetrics = new TraceMetrics(instrumentFactory)
+
+ // Java API.
+ def factory: EntityRecorderFactory[TraceMetrics] = this
+}
+
+class SegmentMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) {
+
+ /**
+ * Records blah blah
+ */
+ val elapsedTime = histogram("elapsed-time", unitOfMeasurement = Time.Nanoseconds)
+ val errors = counter("errors")
+}
+
+object SegmentMetrics extends EntityRecorderFactory[SegmentMetrics] {
+ def category: String = "trace-segment"
+ def createRecorder(instrumentFactory: InstrumentFactory): SegmentMetrics = new SegmentMetrics(instrumentFactory)
+
+ // Java API.
+ def factory: EntityRecorderFactory[SegmentMetrics] = this
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala
new file mode 100644
index 00000000..b7ab60de
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala
@@ -0,0 +1,65 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric.instrument
+
+import kamon.jsr166.LongAdder
+
+trait Counter extends Instrument {
+ type SnapshotType = Counter.Snapshot
+
+ def increment(): Unit
+ def increment(times: Long): Unit
+}
+
+object Counter {
+
+ def apply(): Counter = new LongAdderCounter
+ def create(): Counter = apply()
+
+ trait Snapshot extends InstrumentSnapshot {
+ def count: Long
+ def merge(that: InstrumentSnapshot, context: CollectionContext): Counter.Snapshot
+ def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Counter.Snapshot
+ }
+}
+
+class LongAdderCounter extends Counter {
+ private val counter = new LongAdder
+
+ def increment(): Unit = counter.increment()
+
+ def increment(times: Long): Unit = {
+ if (times < 0)
+ throw new UnsupportedOperationException("Counters cannot be decremented")
+ counter.add(times)
+ }
+
+ def collect(context: CollectionContext): Counter.Snapshot = CounterSnapshot(counter.sumAndReset())
+
+ def cleanup: Unit = {}
+}
+
+case class CounterSnapshot(count: Long) extends Counter.Snapshot {
+ def merge(that: InstrumentSnapshot, context: CollectionContext): Counter.Snapshot = that match {
+ case CounterSnapshot(thatCount) ⇒ CounterSnapshot(count + thatCount)
+ case other ⇒ sys.error(s"Cannot merge a CounterSnapshot with the incompatible [${other.getClass.getName}] type.")
+ }
+
+ override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Counter.Snapshot =
+ CounterSnapshot(from.tryScale(to)(count).toLong)
+
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala
new file mode 100644
index 00000000..39571d3d
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala
@@ -0,0 +1,120 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric.instrument
+
+import java.util.concurrent.atomic.{AtomicLong, AtomicLongFieldUpdater, AtomicReference}
+
+import akka.actor.Cancellable
+import kamon.metric.instrument.Gauge.CurrentValueCollector
+import kamon.metric.instrument.Histogram.DynamicRange
+
+import scala.concurrent.duration.FiniteDuration
+
+trait Gauge extends Instrument {
+ type SnapshotType = Histogram.Snapshot
+
+ def record(value: Long): Unit
+ def record(value: Long, count: Long): Unit
+ def refreshValue(): Unit
+}
+
+object Gauge {
+
+ def apply(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler, valueCollector: CurrentValueCollector): Gauge = {
+ val underlyingHistogram = Histogram(dynamicRange)
+ val gauge = new HistogramBackedGauge(underlyingHistogram, valueCollector)
+ val refreshValuesSchedule = scheduler.schedule(refreshInterval, () ⇒ {
+ gauge.refreshValue()
+ })
+
+ gauge.automaticValueCollectorSchedule.set(refreshValuesSchedule)
+ gauge
+ }
+
+ def create(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler, valueCollector: CurrentValueCollector): Gauge =
+ apply(dynamicRange, refreshInterval, scheduler, valueCollector)
+
+ trait CurrentValueCollector {
+ def currentValue: Long
+ }
+
+ implicit def functionZeroAsCurrentValueCollector(f: () ⇒ Long): CurrentValueCollector = new CurrentValueCollector {
+ def currentValue: Long = f.apply()
+ }
+
+ implicit def callByNameLongAsCurrentValueCollector(f: ⇒ Long): CurrentValueCollector = new CurrentValueCollector {
+ def currentValue: Long = f
+ }
+}
+
+/**
+ * Helper for cases in which a gauge shouldn't store the current value of a observed value but the difference between
+ * the current observed value and the previously observed value. Should only be used if the observed value is always
+ * increasing or staying steady, but is never able to decrease.
+ *
+ * Note: The first time a value is collected, this wrapper will always return zero, afterwards, the difference between
+ * the current value and the last value will be returned.
+ */
+class DifferentialValueCollector(wrappedValueCollector: CurrentValueCollector) extends CurrentValueCollector {
+ @volatile private var _readAtLeastOnce = false
+ private val _lastObservedValue = new AtomicLong(0)
+
+ def currentValue: Long = {
+ if (_readAtLeastOnce) {
+ val wrappedCurrent = wrappedValueCollector.currentValue
+ val diff = wrappedCurrent - _lastObservedValue.getAndSet(wrappedCurrent)
+
+ if (diff >= 0) diff else 0L
+
+ } else {
+ _lastObservedValue.set(wrappedValueCollector.currentValue)
+ _readAtLeastOnce = true
+ 0L
+ }
+
+ }
+}
+
+object DifferentialValueCollector {
+ def apply(wrappedValueCollector: CurrentValueCollector): CurrentValueCollector =
+ new DifferentialValueCollector(wrappedValueCollector)
+
+ def apply(wrappedValueCollector: ⇒ Long): CurrentValueCollector =
+ new DifferentialValueCollector(new CurrentValueCollector {
+ def currentValue: Long = wrappedValueCollector
+ })
+}
+
+class HistogramBackedGauge(underlyingHistogram: Histogram, currentValueCollector: Gauge.CurrentValueCollector) extends Gauge {
+ private[kamon] val automaticValueCollectorSchedule = new AtomicReference[Cancellable]()
+
+ def record(value: Long): Unit = underlyingHistogram.record(value)
+
+ def record(value: Long, count: Long): Unit = underlyingHistogram.record(value, count)
+
+ def collect(context: CollectionContext): Histogram.Snapshot = underlyingHistogram.collect(context)
+
+ def cleanup: Unit = {
+ if (automaticValueCollectorSchedule.get() != null)
+ automaticValueCollectorSchedule.get().cancel()
+ }
+
+ def refreshValue(): Unit =
+ underlyingHistogram.record(currentValueCollector.currentValue)
+
+}
+
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala
new file mode 100644
index 00000000..399f0880
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala
@@ -0,0 +1,331 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric.instrument
+
+import java.nio.LongBuffer
+
+import kamon.metric.instrument.Histogram.{DynamicRange, Snapshot}
+import kamon.util.logger.LazyLogger
+import org.HdrHistogram.ModifiedAtomicHistogram
+
+trait Histogram extends Instrument {
+ type SnapshotType = Histogram.Snapshot
+
+ def record(value: Long): Unit
+ def record(value: Long, count: Long): Unit
+}
+
+object Histogram {
+
+ /**
+ * Scala API:
+ *
+ * Create a new High Dynamic Range Histogram ([[kamon.metric.instrument.HdrHistogram]]) using the given
+ * [[kamon.metric.instrument.Histogram.DynamicRange]].
+ */
+ def apply(dynamicRange: DynamicRange): Histogram = new HdrHistogram(dynamicRange)
+
+ /**
+ * Java API:
+ *
+ * Create a new High Dynamic Range Histogram ([[kamon.metric.instrument.HdrHistogram]]) using the given
+ * [[kamon.metric.instrument.Histogram.DynamicRange]].
+ */
+ def create(dynamicRange: DynamicRange): Histogram = apply(dynamicRange)
+
+ /**
+ * DynamicRange is a configuration object used to supply range and precision configuration to a
+ * [[kamon.metric.instrument.HdrHistogram]]. See the [[http://hdrhistogram.github.io/HdrHistogram/ HdrHistogram website]]
+ * for more details on how it works and the effects of these configuration values.
+ *
+ * @param lowestDiscernibleValue
+ * The lowest value that can be discerned (distinguished from 0) by the histogram.Must be a positive integer that
+ * is >= 1. May be internally rounded down to nearest power of 2.
+ * @param highestTrackableValue
+ * The highest value to be tracked by the histogram. Must be a positive integer that is >= (2 * lowestDiscernibleValue).
+ * Must not be larger than (Long.MAX_VALUE/2).
+ * @param precision
+ * The number of significant decimal digits to which the histogram will maintain value resolution and separation.
+ * Must be a non-negative integer between 1 and 3.
+ */
+ case class DynamicRange(lowestDiscernibleValue: Long, highestTrackableValue: Long, precision: Int)
+
+ trait Record {
+ def level: Long
+ def count: Long
+
+ private[kamon] def rawCompactRecord: Long
+ }
+
+ case class MutableRecord(var level: Long, var count: Long) extends Record {
+ var rawCompactRecord: Long = 0L
+ }
+
+ trait Snapshot extends InstrumentSnapshot {
+
+ def isEmpty: Boolean = numberOfMeasurements == 0
+ def numberOfMeasurements: Long
+ def min: Long
+ def max: Long
+ def sum: Long
+ def percentile(percentile: Double): Long
+ def recordsIterator: Iterator[Record]
+ def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot
+ def merge(that: Histogram.Snapshot, context: CollectionContext): Histogram.Snapshot
+
+ override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot =
+ new ScaledSnapshot(from, to, this)
+ }
+
+ class ScaledSnapshot(from: UnitOfMeasurement, to: UnitOfMeasurement, snapshot: Snapshot) extends Snapshot {
+ private def doScale(v: Long) = from.tryScale(to)(v).toLong
+ override def numberOfMeasurements: Long = snapshot.numberOfMeasurements
+
+ override def max: Long = doScale(snapshot.max)
+
+ override def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot = snapshot.merge(that, context)
+
+ override def merge(that: Snapshot, context: CollectionContext): Snapshot = snapshot.merge(that, context)
+
+ override def percentile(percentile: Double): Long = doScale(snapshot.percentile(percentile))
+
+ override def min: Long = doScale(snapshot.min)
+
+ override def sum: Long = doScale(snapshot.sum)
+
+ override def recordsIterator: Iterator[Record] = {
+ snapshot.recordsIterator.map(record ⇒ new Record {
+ override def count: Long = record.count
+
+ override def level: Long = doScale(record.level)
+
+ override private[kamon] def rawCompactRecord: Long = record.rawCompactRecord
+ })
+ }
+
+ override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot =
+ if (this.from == from && this.to == to) this else super.scale(from, to)
+ }
+
+ object Snapshot {
+ val empty = new Snapshot {
+ override def min: Long = 0L
+ override def max: Long = 0L
+ override def sum: Long = 0L
+ override def percentile(percentile: Double): Long = 0L
+ override def recordsIterator: Iterator[Record] = Iterator.empty
+ override def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot = that
+ override def merge(that: Histogram.Snapshot, context: CollectionContext): Histogram.Snapshot = that
+ override def numberOfMeasurements: Long = 0L
+ override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot = this
+ }
+ }
+}
+
+object HdrHistogram {
+ private val log = LazyLogger(classOf[HdrHistogram])
+}
+
+/**
+ * This implementation is meant to be used for real time data collection where data snapshots are taken often over time.
+ * The collect(..) operation extracts all the recorded values from the histogram and resets the counts, but still
+ * leave it in a consistent state even in the case of concurrent modification while the snapshot is being taken.
+ */
+class HdrHistogram(dynamicRange: DynamicRange) extends ModifiedAtomicHistogram(
+ dynamicRange.lowestDiscernibleValue,
+ dynamicRange.highestTrackableValue, dynamicRange.precision
+) with Histogram {
+ import HdrHistogram.log
+
+ def record(value: Long): Unit = tryRecord(value, 1L)
+
+ def record(value: Long, count: Long): Unit = tryRecord(value, count)
+
+ private def tryRecord(value: Long, count: Long): Unit = {
+ try {
+ recordValueWithCount(value, count)
+ } catch {
+ case anyException: Throwable ⇒
+ log.warn(s"Failed to store value $value in HdrHistogram, please review your range configuration.", anyException)
+ }
+ }
+
+ def collect(context: CollectionContext): Histogram.Snapshot = {
+ import context.buffer
+ buffer.clear()
+ val nrOfMeasurements = writeSnapshotTo(buffer)
+
+ buffer.flip()
+
+ val measurementsArray = Array.ofDim[Long](buffer.limit())
+ buffer.get(measurementsArray, 0, measurementsArray.length)
+ new CompactHdrSnapshot(nrOfMeasurements, measurementsArray, protectedUnitMagnitude(), protectedSubBucketHalfCount(), protectedSubBucketHalfCountMagnitude())
+ }
+
+ def getCounts = countsArray().length()
+
+ def cleanup: Unit = {}
+
+ private def writeSnapshotTo(buffer: LongBuffer): Long = {
+ val counts = countsArray()
+ val countsLength = counts.length()
+
+ var nrOfMeasurements = 0L
+ var index = 0L
+ while (index < countsLength) {
+ val countAtIndex = counts.getAndSet(index.toInt, 0L)
+
+ if (countAtIndex > 0) {
+ buffer.put(CompactHdrSnapshot.compactRecord(index, countAtIndex))
+ nrOfMeasurements += countAtIndex
+ }
+
+ index += 1
+ }
+ nrOfMeasurements
+ }
+}
+
+case class CompactHdrSnapshot(numberOfMeasurements: Long, compactRecords: Array[Long], unitMagnitude: Int,
+ subBucketHalfCount: Int, subBucketHalfCountMagnitude: Int) extends Histogram.Snapshot {
+
+ def min: Long = if (compactRecords.length == 0) 0 else levelFromCompactRecord(compactRecords(0))
+ def max: Long = if (compactRecords.length == 0) 0 else levelFromCompactRecord(compactRecords(compactRecords.length - 1))
+ def sum: Long = recordsIterator.foldLeft(0L)((a, r) ⇒ a + (r.count * r.level))
+
+ def percentile(p: Double): Long = {
+ val records = recordsIterator
+ val threshold = numberOfMeasurements * (p / 100D)
+ var countToCurrentLevel = 0L
+ var percentileLevel = 0L
+
+ while (countToCurrentLevel < threshold && records.hasNext) {
+ val record = records.next()
+ countToCurrentLevel += record.count
+ percentileLevel = record.level
+ }
+
+ percentileLevel
+ }
+
+ def merge(that: Histogram.Snapshot, context: CollectionContext): Snapshot =
+ merge(that.asInstanceOf[InstrumentSnapshot], context)
+
+ def merge(that: InstrumentSnapshot, context: CollectionContext): Histogram.Snapshot = that match {
+ case thatSnapshot: CompactHdrSnapshot ⇒
+ if (thatSnapshot.isEmpty) this else if (this.isEmpty) thatSnapshot else {
+ import context.buffer
+ buffer.clear()
+
+ val selfIterator = recordsIterator
+ val thatIterator = thatSnapshot.recordsIterator
+ var thatCurrentRecord: Histogram.Record = null
+ var mergedNumberOfMeasurements = 0L
+
+ def nextOrNull(iterator: Iterator[Histogram.Record]): Histogram.Record = if (iterator.hasNext) iterator.next() else null
+ def addToBuffer(compactRecord: Long): Unit = {
+ mergedNumberOfMeasurements += countFromCompactRecord(compactRecord)
+ buffer.put(compactRecord)
+ }
+
+ while (selfIterator.hasNext) {
+ val selfCurrentRecord = selfIterator.next()
+
+ // Advance that to no further than the level of selfCurrentRecord
+ thatCurrentRecord = if (thatCurrentRecord == null) nextOrNull(thatIterator) else thatCurrentRecord
+ while (thatCurrentRecord != null && thatCurrentRecord.level < selfCurrentRecord.level) {
+ addToBuffer(thatCurrentRecord.rawCompactRecord)
+ thatCurrentRecord = nextOrNull(thatIterator)
+ }
+
+ // Include the current record of self and optionally merge if has the same level as thatCurrentRecord
+ if (thatCurrentRecord != null && thatCurrentRecord.level == selfCurrentRecord.level) {
+ addToBuffer(mergeCompactRecords(thatCurrentRecord.rawCompactRecord, selfCurrentRecord.rawCompactRecord))
+ thatCurrentRecord = nextOrNull(thatIterator)
+ } else {
+ addToBuffer(selfCurrentRecord.rawCompactRecord)
+ }
+ }
+
+ // Include everything that might have been left from that
+ if (thatCurrentRecord != null) addToBuffer(thatCurrentRecord.rawCompactRecord)
+ while (thatIterator.hasNext) {
+ addToBuffer(thatIterator.next().rawCompactRecord)
+ }
+
+ buffer.flip()
+ val compactRecords = Array.ofDim[Long](buffer.limit())
+ buffer.get(compactRecords)
+
+ new CompactHdrSnapshot(mergedNumberOfMeasurements, compactRecords, unitMagnitude, subBucketHalfCount, subBucketHalfCountMagnitude)
+ }
+
+ case other ⇒
+ sys.error(s"Cannot merge a CompactHdrSnapshot with the incompatible [${other.getClass.getName}] type.")
+
+ }
+
+ @inline private def mergeCompactRecords(left: Long, right: Long): Long = {
+ val index = left >> 48
+ val leftCount = countFromCompactRecord(left)
+ val rightCount = countFromCompactRecord(right)
+
+ CompactHdrSnapshot.compactRecord(index, leftCount + rightCount)
+ }
+
+ @inline private def levelFromCompactRecord(compactRecord: Long): Long = {
+ val countsArrayIndex = (compactRecord >> 48).toInt
+ var bucketIndex: Int = (countsArrayIndex >> subBucketHalfCountMagnitude) - 1
+ var subBucketIndex: Int = (countsArrayIndex & (subBucketHalfCount - 1)) + subBucketHalfCount
+ if (bucketIndex < 0) {
+ subBucketIndex -= subBucketHalfCount
+ bucketIndex = 0
+ }
+
+ subBucketIndex.toLong << (bucketIndex + unitMagnitude)
+ }
+
+ @inline private def countFromCompactRecord(compactRecord: Long): Long =
+ compactRecord & CompactHdrSnapshot.CompactRecordCountMask
+
+ def recordsIterator: Iterator[Histogram.Record] = new Iterator[Histogram.Record] {
+ var currentIndex = 0
+ val mutableRecord = Histogram.MutableRecord(0, 0)
+
+ override def hasNext: Boolean = currentIndex < compactRecords.length
+
+ override def next(): Histogram.Record = {
+ if (hasNext) {
+ val measurement = compactRecords(currentIndex)
+ mutableRecord.rawCompactRecord = measurement
+ mutableRecord.level = levelFromCompactRecord(measurement)
+ mutableRecord.count = countFromCompactRecord(measurement)
+ currentIndex += 1
+
+ mutableRecord
+ } else {
+ throw new IllegalStateException("The iterator has already been consumed.")
+ }
+ }
+ }
+}
+
+object CompactHdrSnapshot {
+ val CompactRecordCountMask = 0xFFFFFFFFFFFFL
+
+ def compactRecord(index: Long, count: Long): Long = (index << 48) | count
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala
new file mode 100644
index 00000000..2c4b4319
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala
@@ -0,0 +1,51 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric.instrument
+
+import java.nio.LongBuffer
+
+private[kamon] trait Instrument {
+ type SnapshotType <: InstrumentSnapshot
+
+ def collect(context: CollectionContext): SnapshotType
+ def cleanup: Unit
+}
+
+trait InstrumentSnapshot {
+ def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot
+
+ def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): InstrumentSnapshot
+}
+
+trait CollectionContext {
+ def buffer: LongBuffer
+}
+
+object CollectionContext {
+ def apply(longBufferSize: Int): CollectionContext = new CollectionContext {
+ val buffer: LongBuffer = LongBuffer.allocate(longBufferSize)
+ }
+}
+
+sealed trait InstrumentType
+
+object InstrumentTypes {
+ case object Histogram extends InstrumentType
+ case object MinMaxCounter extends InstrumentType
+ case object Gauge extends InstrumentType
+ case object Counter extends InstrumentType
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala
new file mode 100644
index 00000000..7c0201f7
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala
@@ -0,0 +1,51 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric.instrument
+
+import kamon.metric.instrument.Gauge.CurrentValueCollector
+import kamon.metric.instrument.Histogram.DynamicRange
+
+import scala.concurrent.duration.FiniteDuration
+
+case class InstrumentFactory(configurations: Map[String, InstrumentCustomSettings], defaults: DefaultInstrumentSettings, scheduler: RefreshScheduler) {
+
+ private def resolveSettings(instrumentName: String, codeSettings: Option[InstrumentSettings], default: InstrumentSettings): InstrumentSettings = {
+ configurations.get(instrumentName).flatMap { customSettings ⇒
+ codeSettings.map(cs ⇒ customSettings.combine(cs)) orElse (Some(customSettings.combine(default)))
+
+ } getOrElse (codeSettings.getOrElse(default))
+ }
+
+ def createHistogram(name: String, dynamicRange: Option[DynamicRange] = None): Histogram = {
+ val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, None)), defaults.histogram)
+ Histogram(settings.dynamicRange)
+ }
+
+ def createMinMaxCounter(name: String, dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None): MinMaxCounter = {
+ val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, refreshInterval)), defaults.minMaxCounter)
+ MinMaxCounter(settings.dynamicRange, settings.refreshInterval.get, scheduler)
+ }
+
+ def createGauge(name: String, dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None,
+ valueCollector: CurrentValueCollector): Gauge = {
+
+ val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, refreshInterval)), defaults.gauge)
+ Gauge(settings.dynamicRange, settings.refreshInterval.get, scheduler, valueCollector)
+ }
+
+ def createCounter(): Counter = Counter()
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala
new file mode 100644
index 00000000..e4d6f547
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala
@@ -0,0 +1,73 @@
+package kamon.metric.instrument
+
+import com.typesafe.config.Config
+import kamon.metric.instrument.Histogram.DynamicRange
+
+import scala.concurrent.duration.FiniteDuration
+
+case class InstrumentCustomSettings(lowestDiscernibleValue: Option[Long], highestTrackableValue: Option[Long],
+ precision: Option[Int], refreshInterval: Option[FiniteDuration]) {
+
+ def combine(that: InstrumentSettings): InstrumentSettings =
+ InstrumentSettings(
+ DynamicRange(
+ lowestDiscernibleValue.getOrElse(that.dynamicRange.lowestDiscernibleValue),
+ highestTrackableValue.getOrElse(that.dynamicRange.highestTrackableValue),
+ precision.getOrElse(that.dynamicRange.precision)
+ ),
+ refreshInterval.orElse(that.refreshInterval)
+ )
+}
+
+object InstrumentCustomSettings {
+ import kamon.util.ConfigTools.Syntax
+
+ def fromConfig(config: Config): InstrumentCustomSettings =
+ InstrumentCustomSettings(
+ if (config.hasPath("lowest-discernible-value")) Some(config.getLong("lowest-discernible-value")) else None,
+ if (config.hasPath("highest-trackable-value")) Some(config.getLong("highest-trackable-value")) else None,
+ if (config.hasPath("precision")) Some(InstrumentSettings.parsePrecision(config.getString("precision"))) else None,
+ if (config.hasPath("refresh-interval")) Some(config.getFiniteDuration("refresh-interval")) else None
+ )
+
+}
+
+case class InstrumentSettings(dynamicRange: DynamicRange, refreshInterval: Option[FiniteDuration])
+
+object InstrumentSettings {
+
+ def readDynamicRange(config: Config): DynamicRange =
+ DynamicRange(
+ config.getLong("lowest-discernible-value"),
+ config.getLong("highest-trackable-value"),
+ parsePrecision(config.getString("precision"))
+ )
+
+ def parsePrecision(stringValue: String): Int = stringValue match {
+ case "low" ⇒ 1
+ case "normal" ⇒ 2
+ case "fine" ⇒ 3
+ case other ⇒ sys.error(s"Invalid precision configuration [$other] found, valid options are: [low|normal|fine].")
+ }
+}
+
+case class DefaultInstrumentSettings(histogram: InstrumentSettings, minMaxCounter: InstrumentSettings, gauge: InstrumentSettings)
+
+object DefaultInstrumentSettings {
+
+ def fromConfig(config: Config): DefaultInstrumentSettings = {
+ import kamon.util.ConfigTools.Syntax
+
+ val histogramSettings = InstrumentSettings(InstrumentSettings.readDynamicRange(config.getConfig("histogram")), None)
+ val minMaxCounterSettings = InstrumentSettings(
+ InstrumentSettings.readDynamicRange(config.getConfig("min-max-counter")),
+ Some(config.getFiniteDuration("min-max-counter.refresh-interval"))
+ )
+ val gaugeSettings = InstrumentSettings(
+ InstrumentSettings.readDynamicRange(config.getConfig("gauge")),
+ Some(config.getFiniteDuration("gauge.refresh-interval"))
+ )
+
+ DefaultInstrumentSettings(histogramSettings, minMaxCounterSettings, gaugeSettings)
+ }
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala
new file mode 100644
index 00000000..76fc2c2a
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala
@@ -0,0 +1,105 @@
+package kamon.metric.instrument
+
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+import java.lang.Math.abs
+import java.util.concurrent.atomic.AtomicReference
+import akka.actor.Cancellable
+import kamon.jsr166.LongMaxUpdater
+import kamon.metric.instrument.Histogram.DynamicRange
+import kamon.util.PaddedAtomicLong
+import scala.concurrent.duration.FiniteDuration
+
+trait MinMaxCounter extends Instrument {
+ override type SnapshotType = Histogram.Snapshot
+
+ def increment(): Unit
+ def increment(times: Long): Unit
+ def decrement(): Unit
+ def decrement(times: Long): Unit
+ def refreshValues(): Unit
+}
+
+object MinMaxCounter {
+
+ def apply(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler): MinMaxCounter = {
+ val underlyingHistogram = Histogram(dynamicRange)
+ val minMaxCounter = new PaddedMinMaxCounter(underlyingHistogram)
+ val refreshValuesSchedule = scheduler.schedule(refreshInterval, () ⇒ {
+ minMaxCounter.refreshValues()
+ })
+
+ minMaxCounter.refreshValuesSchedule.set(refreshValuesSchedule)
+ minMaxCounter
+ }
+
+ def create(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler): MinMaxCounter =
+ apply(dynamicRange, refreshInterval, scheduler)
+
+}
+
+class PaddedMinMaxCounter(underlyingHistogram: Histogram) extends MinMaxCounter {
+ private val min = new LongMaxUpdater(0L)
+ private val max = new LongMaxUpdater(0L)
+ private val sum = new PaddedAtomicLong
+ val refreshValuesSchedule = new AtomicReference[Cancellable]()
+
+ def increment(): Unit = increment(1L)
+
+ def increment(times: Long): Unit = {
+ val currentValue = sum.addAndGet(times)
+ max.update(currentValue)
+ }
+
+ def decrement(): Unit = decrement(1L)
+
+ def decrement(times: Long): Unit = {
+ val currentValue = sum.addAndGet(-times)
+ min.update(-currentValue)
+ }
+
+ def collect(context: CollectionContext): Histogram.Snapshot = {
+ refreshValues()
+ underlyingHistogram.collect(context)
+ }
+
+ def cleanup: Unit = {
+ if (refreshValuesSchedule.get() != null)
+ refreshValuesSchedule.get().cancel()
+ }
+
+ def refreshValues(): Unit = {
+ val currentValue = {
+ val value = sum.get()
+ if (value <= 0) 0 else value
+ }
+
+ val currentMin = {
+ val rawMin = min.maxThenReset(-currentValue)
+ if (rawMin >= 0)
+ 0
+ else
+ abs(rawMin)
+ }
+
+ val currentMax = max.maxThenReset(currentValue)
+
+ underlyingHistogram.record(currentValue)
+ underlyingHistogram.record(currentMin)
+ underlyingHistogram.record(currentMax)
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala
new file mode 100644
index 00000000..eb01d114
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala
@@ -0,0 +1,31 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package org.HdrHistogram
+
+import java.util.concurrent.atomic.AtomicLongArray
+
+abstract class ModifiedAtomicHistogram(low: Long, high: Long, precision: Int)
+ extends AtomicHistogram(low, high, precision) { self ⇒
+
+ override def incrementTotalCount(): Unit = {}
+ override def addToTotalCount(value: Long): Unit = {}
+
+ def countsArray(): AtomicLongArray = counts
+ def protectedUnitMagnitude(): Int = unitMagnitude
+ def protectedSubBucketHalfCount(): Int = subBucketHalfCount
+ def protectedSubBucketHalfCountMagnitude(): Int = subBucketHalfCountMagnitude
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala
new file mode 100644
index 00000000..6bc02dc3
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala
@@ -0,0 +1,115 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric.instrument
+
+import akka.actor.{Scheduler, Cancellable}
+import org.HdrHistogram.WriterReaderPhaser
+
+import scala.collection.concurrent.TrieMap
+import scala.concurrent.ExecutionContext
+import scala.concurrent.duration.FiniteDuration
+
+trait RefreshScheduler {
+ def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable
+}
+
+/**
+ * Default implementation of RefreshScheduler that simply uses an [[akka.actor.Scheduler]] to schedule tasks to be run
+ * in the provided ExecutionContext.
+ */
+class DefaultRefreshScheduler(scheduler: Scheduler, dispatcher: ExecutionContext) extends RefreshScheduler {
+ def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable =
+ scheduler.schedule(interval, interval)(refresh.apply())(dispatcher)
+}
+
+object DefaultRefreshScheduler {
+ def apply(scheduler: Scheduler, dispatcher: ExecutionContext): RefreshScheduler =
+ new DefaultRefreshScheduler(scheduler, dispatcher)
+
+ def create(scheduler: Scheduler, dispatcher: ExecutionContext): RefreshScheduler =
+ apply(scheduler, dispatcher)
+}
+
+/**
+ * RefreshScheduler implementation that accumulates all the scheduled actions until it is pointed to another refresh
+ * scheduler. Once it is pointed, all subsequent calls to `schedule` will immediately be scheduled in the pointed
+ * scheduler.
+ */
+class LazyRefreshScheduler extends RefreshScheduler {
+ private val _schedulerPhaser = new WriterReaderPhaser
+ private val _backlog = new TrieMap[(FiniteDuration, () ⇒ Unit), RepointableCancellable]()
+ @volatile private var _target: Option[RefreshScheduler] = None
+
+ def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable = {
+ val criticalEnter = _schedulerPhaser.writerCriticalSectionEnter()
+ try {
+ _target.map { scheduler ⇒
+ scheduler.schedule(interval, refresh)
+
+ } getOrElse {
+ val entry = (interval, refresh)
+ val cancellable = new RepointableCancellable(entry)
+
+ _backlog.put(entry, cancellable)
+ cancellable
+ }
+
+ } finally {
+ _schedulerPhaser.writerCriticalSectionExit(criticalEnter)
+ }
+ }
+
+ def point(target: RefreshScheduler): Unit = try {
+ _schedulerPhaser.readerLock()
+
+ if (_target.isEmpty) {
+ _target = Some(target)
+ _schedulerPhaser.flipPhase(10000L)
+ _backlog.dropWhile {
+ case ((interval, refresh), repointableCancellable) ⇒
+ repointableCancellable.point(target.schedule(interval, refresh))
+ true
+ }
+ } else sys.error("A LazyRefreshScheduler cannot be pointed more than once.")
+ } finally { _schedulerPhaser.readerUnlock() }
+
+ class RepointableCancellable(entry: (FiniteDuration, () ⇒ Unit)) extends Cancellable {
+ private var _isCancelled = false
+ private var _cancellable: Option[Cancellable] = None
+
+ def isCancelled: Boolean = synchronized {
+ _cancellable.map(_.isCancelled).getOrElse(_isCancelled)
+ }
+
+ def cancel(): Boolean = synchronized {
+ _isCancelled = true
+ _cancellable.map(_.cancel()).getOrElse(_backlog.remove(entry).nonEmpty)
+ }
+
+ def point(cancellable: Cancellable): Unit = synchronized {
+ if (_cancellable.isEmpty) {
+ _cancellable = Some(cancellable)
+
+ if (_isCancelled)
+ cancellable.cancel()
+
+ } else sys.error("A RepointableCancellable cannot be pointed more than once.")
+
+ }
+ }
+}
+
diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala
new file mode 100644
index 00000000..5952b906
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala
@@ -0,0 +1,109 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.metric.instrument
+
+/**
+ * A UnitOfMeasurement implementation describes the magnitude of a quantity being measured, such as Time and computer
+ * Memory space. Kamon uses UnitOfMeasurement implementations just as a informative companion to metrics inside entity
+ * recorders and might be used to scale certain kinds of measurements in metric backends.
+ */
+trait UnitOfMeasurement {
+ type U <: UnitOfMeasurement
+
+ def name: String
+ def label: String
+ def scale(toUnit: U)(value: Double): Double = value
+
+ def tryScale(toUnit: UnitOfMeasurement)(value: Double): Double =
+ if (canScale(toUnit)) scale(toUnit.asInstanceOf[U])(value)
+ else throw new IllegalArgumentException(s"Can't scale different types of units `$name` and `${toUnit.name}`")
+
+ protected def canScale(toUnit: UnitOfMeasurement): Boolean
+
+}
+
+object UnitOfMeasurement {
+ case object Unknown extends UnitOfMeasurement {
+ override type U = Unknown.type
+ val name = "unknown"
+ val label = "unknown"
+
+ override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isUnknown(toUnit)
+ }
+
+ def isUnknown(uom: UnitOfMeasurement): Boolean =
+ uom == Unknown
+
+ def isTime(uom: UnitOfMeasurement): Boolean =
+ uom.isInstanceOf[Time]
+
+ def isMemory(uom: UnitOfMeasurement): Boolean =
+ uom.isInstanceOf[Memory]
+
+}
+
+/**
+ * UnitOfMeasurement representing time.
+ */
+case class Time(factor: Double, label: String) extends UnitOfMeasurement {
+ override type U = Time
+ val name = "time"
+
+ override def scale(toUnit: Time)(value: Double): Double =
+ (value * factor) / toUnit.factor
+
+ override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isTime(toUnit)
+}
+
+object Time {
+ val Nanoseconds = Time(1E-9, "n")
+ val Microseconds = Time(1E-6, "µs")
+ val Milliseconds = Time(1E-3, "ms")
+ val Seconds = Time(1, "s")
+
+ val units = List(Nanoseconds, Microseconds, Milliseconds, Seconds)
+
+ def apply(time: String): Time = units.find(_.label.toLowerCase == time.toLowerCase) getOrElse {
+ throw new IllegalArgumentException(s"Can't recognize time unit '$time'")
+ }
+}
+
+/**
+ * UnitOfMeasurement representing computer memory space.
+ */
+case class Memory(factor: Double, label: String) extends UnitOfMeasurement {
+ override type U = Memory
+ val name = "bytes"
+
+ override def scale(toUnit: Memory)(value: Double): Double =
+ (value * factor) / toUnit.factor
+
+ override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isMemory(toUnit)
+}
+
+object Memory {
+ val Bytes = Memory(1, "b")
+ val KiloBytes = Memory(1024, "Kb")
+ val MegaBytes = Memory(1024 * 1024, "Mb")
+ val GigaBytes = Memory(1024 * 1024 * 1024, "Gb")
+
+ val units = List(Bytes, KiloBytes, MegaBytes, GigaBytes)
+
+ def apply(memory: String): Memory = units.find(_.label.toLowerCase == memory.toLowerCase) getOrElse {
+ throw new IllegalArgumentException(s"Can't recognize memory unit '$memory'")
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala b/kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala
new file mode 100644
index 00000000..fe5ad569
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala
@@ -0,0 +1,94 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import akka.actor.{ActorLogging, Props, Actor, ActorRef}
+import kamon.trace.Incubator.{CheckForCompletedTraces, IncubatingTrace}
+import kamon.util.{NanoInterval, RelativeNanoTimestamp}
+import scala.annotation.tailrec
+import scala.collection.immutable.Queue
+import kamon.util.ConfigTools.Syntax
+
+class Incubator(subscriptions: ActorRef) extends Actor with ActorLogging {
+ import context.dispatcher
+ val config = context.system.settings.config.getConfig("kamon.trace.incubator")
+
+ val minIncubationTime = new NanoInterval(config.getFiniteDuration("min-incubation-time").toNanos)
+ val maxIncubationTime = new NanoInterval(config.getFiniteDuration("max-incubation-time").toNanos)
+ val checkInterval = config.getFiniteDuration("check-interval")
+
+ val checkSchedule = context.system.scheduler.schedule(checkInterval, checkInterval, self, CheckForCompletedTraces)
+ var waitingForMinimumIncubation = Queue.empty[IncubatingTrace]
+ var waitingForIncubationFinish = List.empty[IncubatingTrace]
+
+ def receive = {
+ case tc: TracingContext ⇒ incubate(tc)
+ case CheckForCompletedTraces ⇒
+ checkWaitingForMinimumIncubation()
+ checkWaitingForIncubationFinish()
+ }
+
+ def incubate(tc: TracingContext): Unit =
+ waitingForMinimumIncubation = waitingForMinimumIncubation.enqueue(IncubatingTrace(tc, RelativeNanoTimestamp.now))
+
+ @tailrec private def checkWaitingForMinimumIncubation(): Unit = {
+ if (waitingForMinimumIncubation.nonEmpty) {
+ val it = waitingForMinimumIncubation.head
+ if (NanoInterval.since(it.incubationStart) >= minIncubationTime) {
+ waitingForMinimumIncubation = waitingForMinimumIncubation.tail
+
+ if (it.tc.shouldIncubate)
+ waitingForIncubationFinish = it :: waitingForIncubationFinish
+ else
+ dispatchTraceInfo(it.tc)
+
+ checkWaitingForMinimumIncubation()
+ }
+ }
+ }
+
+ private def checkWaitingForIncubationFinish(): Unit = {
+ waitingForIncubationFinish = waitingForIncubationFinish.filter {
+ case IncubatingTrace(context, incubationStart) ⇒
+ if (!context.shouldIncubate) {
+ dispatchTraceInfo(context)
+ false
+ } else {
+ if (NanoInterval.since(incubationStart) >= maxIncubationTime) {
+ log.warning("Trace [{}] with token [{}] has reached the maximum incubation time, will be reported as is.", context.name, context.token)
+ dispatchTraceInfo(context);
+ false
+ } else true
+ }
+ }
+ }
+
+ def dispatchTraceInfo(tc: TracingContext): Unit = subscriptions ! tc.generateTraceInfo
+
+ override def postStop(): Unit = {
+ super.postStop()
+ checkSchedule.cancel()
+ }
+}
+
+object Incubator {
+
+ def props(subscriptions: ActorRef): Props = Props(new Incubator(subscriptions))
+
+ case object CheckForCompletedTraces
+ case class IncubatingTrace(tc: TracingContext, incubationStart: RelativeNanoTimestamp)
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala b/kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala
new file mode 100644
index 00000000..bc0bedba
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala
@@ -0,0 +1,186 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import java.util.concurrent.ConcurrentLinkedQueue
+
+import akka.event.LoggingAdapter
+import kamon.Kamon
+import kamon.metric.{SegmentMetrics, TraceMetrics}
+import kamon.util.{NanoInterval, RelativeNanoTimestamp}
+
+import scala.annotation.tailrec
+import scala.collection.concurrent.TrieMap
+
+private[kamon] class MetricsOnlyContext(
+ traceName: String,
+ val token: String,
+ traceTags: Map[String, String],
+ currentStatus: Status,
+ val levelOfDetail: LevelOfDetail,
+ val startTimestamp: RelativeNanoTimestamp,
+ log: LoggingAdapter
+) extends TraceContext {
+
+ @volatile private var _name = traceName
+ @volatile private var _status = currentStatus
+ @volatile protected var _elapsedTime = NanoInterval.default
+
+ private val _finishedSegments = new ConcurrentLinkedQueue[SegmentLatencyData]()
+ private val _traceLocalStorage = new TraceLocalStorage
+ private val _tags = TrieMap.empty[String, String] ++= traceTags
+
+ def rename(newName: String): Unit =
+ if (Status.Open == status)
+ _name = newName
+ else
+ log.warning("Can't rename trace from [{}] to [{}] because the trace is already closed.", name, newName)
+
+ def name: String = _name
+ def tags: Map[String, String] = _tags.toMap
+ def isEmpty: Boolean = false
+ def status: Status = _status
+ def addMetadata(key: String, value: String): Unit = {}
+ def addTag(key: String, value: String): Unit = _tags.put(key, value)
+ def removeTag(key: String, value: String): Boolean = _tags.remove(key, value)
+
+ private def finish(withError: Boolean): Unit = {
+ _status = if (withError) Status.FinishedWithError else Status.FinishedSuccessfully
+ val traceElapsedTime = NanoInterval.since(startTimestamp)
+ _elapsedTime = traceElapsedTime
+
+ if (Kamon.metrics.shouldTrack(name, TraceMetrics.category)) {
+ val traceEntity = Kamon.metrics.entity(TraceMetrics, name, _tags.toMap)
+ traceEntity.elapsedTime.record(traceElapsedTime.nanos)
+ if (withError) traceEntity.errors.increment()
+ }
+ drainFinishedSegments()
+ }
+
+ def finish(): Unit = finish(withError = false)
+
+ def finishWithError(cause: Throwable): Unit = {
+ //we should do something with the Throwable in a near future
+ finish(withError = true)
+ }
+
+ def startSegment(segmentName: String, category: String, library: String): Segment =
+ startSegment(segmentName, category, library, Map.empty[String, String])
+
+ def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment =
+ new MetricsOnlySegment(segmentName, category, library, tags)
+
+ @tailrec private def drainFinishedSegments(): Unit = {
+ val segment = _finishedSegments.poll()
+ if (segment != null) {
+ val defaultTags = Map(
+ "trace" → name,
+ "category" → segment.category,
+ "library" → segment.library
+ )
+
+ if (Kamon.metrics.shouldTrack(segment.name, SegmentMetrics.category)) {
+ val segmentEntity = Kamon.metrics.entity(SegmentMetrics, segment.name, defaultTags ++ segment.tags)
+ segmentEntity.elapsedTime.record(segment.duration.nanos)
+ if (segment.isFinishedWithError) segmentEntity.errors.increment()
+ }
+ drainFinishedSegments()
+ }
+ }
+
+ protected def finishSegment(
+ segmentName: String,
+ category: String,
+ library: String,
+ duration: NanoInterval,
+ segmentTags: Map[String, String],
+ isFinishedWithError: Boolean
+ ): Unit = {
+
+ _finishedSegments.add(SegmentLatencyData(segmentName, category, library, duration, segmentTags, isFinishedWithError))
+
+ if (isClosed) {
+ drainFinishedSegments()
+ }
+ }
+
+ // Should only be used by the TraceLocal utilities.
+ def traceLocalStorage: TraceLocalStorage = _traceLocalStorage
+
+ // Handle with care and make sure that the trace is closed before calling this method, otherwise NanoInterval.default
+ // will be returned.
+ def elapsedTime: NanoInterval = _elapsedTime
+
+ class MetricsOnlySegment(
+ segmentName: String,
+ val category: String,
+ val library: String,
+ segmentTags: Map[String, String]
+ ) extends Segment {
+
+ private val _startTimestamp = RelativeNanoTimestamp.now
+ private val _tags = TrieMap.empty[String, String] ++= segmentTags
+
+ @volatile private var _segmentName = segmentName
+ @volatile private var _elapsedTime = NanoInterval.default
+ @volatile private var _status: Status = Status.Open
+
+ def name: String = _segmentName
+ def tags: Map[String, String] = _tags.toMap
+ def isEmpty: Boolean = false
+ def status: Status = _status
+ def addMetadata(key: String, value: String): Unit = {}
+ def addTag(key: String, value: String): Unit = _tags.put(key, value)
+ def removeTag(key: String, value: String): Boolean = _tags.remove(key, value)
+
+ def rename(newName: String): Unit =
+ if (Status.Open == status)
+ _segmentName = newName
+ else
+ log.warning("Can't rename segment from [{}] to [{}] because the segment is already closed.", name, newName)
+
+ private def finish(withError: Boolean): Unit = {
+ _status = if (withError) Status.FinishedWithError else Status.FinishedSuccessfully
+ val segmentElapsedTime = NanoInterval.since(_startTimestamp)
+ _elapsedTime = segmentElapsedTime
+
+ finishSegment(name, category, library, segmentElapsedTime, _tags.toMap, withError)
+ }
+
+ def finishWithError(cause: Throwable): Unit = {
+ //we should do something with the Throwable in a near future
+ finish(withError = true)
+ }
+
+ def finish(): Unit = finish(withError = false)
+
+ // Handle with care and make sure that the segment is closed before calling this method, otherwise
+ // NanoInterval.default will be returned.
+ def elapsedTime: NanoInterval = _elapsedTime
+ def startTimestamp: RelativeNanoTimestamp = _startTimestamp
+
+ }
+}
+
+case class SegmentLatencyData(
+ name: String,
+ category: String,
+ library: String,
+ duration: NanoInterval,
+ tags: Map[String, String],
+ isFinishedWithError: Boolean
+) \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala b/kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala
new file mode 100644
index 00000000..234c67bb
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala
@@ -0,0 +1,101 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import java.net.InetAddress
+import java.util.concurrent.atomic.AtomicLong
+import kamon.util.{NanoTimestamp, NanoInterval, Sequencer}
+import scala.concurrent.forkjoin.ThreadLocalRandom
+
+import scala.util.Try
+
+trait Sampler {
+ def shouldTrace: Boolean
+ def shouldReport(traceElapsedTime: NanoInterval): Boolean
+}
+
+object NoSampling extends Sampler {
+ def shouldTrace: Boolean = false
+ def shouldReport(traceElapsedTime: NanoInterval): Boolean = false
+}
+
+object SampleAll extends Sampler {
+ def shouldTrace: Boolean = true
+ def shouldReport(traceElapsedTime: NanoInterval): Boolean = true
+}
+
+class RandomSampler(chance: Int) extends Sampler {
+ require(chance > 0, "kamon.trace.random-sampler.chance cannot be <= 0")
+ require(chance <= 100, "kamon.trace.random-sampler.chance cannot be > 100")
+
+ def shouldTrace: Boolean = ThreadLocalRandom.current().nextInt(100) <= chance
+ def shouldReport(traceElapsedTime: NanoInterval): Boolean = true
+}
+
+class OrderedSampler(interval: Int) extends Sampler {
+ import OrderedSampler._
+
+ require(interval > 0, "kamon.trace.ordered-sampler.sample-interval cannot be <= 0")
+ assume(interval isPowerOfTwo, "kamon.trace.ordered-sampler.sample-interval must be power of two")
+
+ private val sequencer = Sequencer()
+
+ def shouldTrace: Boolean = (sequencer.next() fastMod interval) == 0
+ def shouldReport(traceElapsedTime: NanoInterval): Boolean = true
+}
+
+object OrderedSampler {
+ implicit class EnhancedInt(i: Int) {
+ def isPowerOfTwo = (i & (i - 1)) == 0
+ }
+
+ implicit class EnhancedLong(dividend: Long) {
+ def fastMod(divisor: Int) = dividend & (divisor - 1)
+ }
+}
+
+class ThresholdSampler(thresholdInNanoseconds: NanoInterval) extends Sampler {
+ require(thresholdInNanoseconds.nanos > 0, "kamon.trace.threshold-sampler.minimum-elapsed-time cannot be <= 0")
+
+ def shouldTrace: Boolean = true
+ def shouldReport(traceElapsedTime: NanoInterval): Boolean = traceElapsedTime >= thresholdInNanoseconds
+}
+
+class ClockSampler(pauseInNanoseconds: NanoInterval) extends Sampler {
+ require(pauseInNanoseconds.nanos > 0, "kamon.trace.clock-sampler.pause cannot be <= 0")
+
+ private val timer: AtomicLong = new AtomicLong(0L)
+
+ def shouldTrace: Boolean = {
+ val now = NanoTimestamp.now.nanos
+ val lastTimer = timer.get()
+ if ((lastTimer + pauseInNanoseconds.nanos) < now)
+ timer.compareAndSet(lastTimer, now)
+ else
+ false
+ }
+ def shouldReport(traceElapsedTime: NanoInterval): Boolean = true
+}
+
+class DefaultTokenGenerator extends Function0[String] {
+ private val _hostnamePrefix = Try(InetAddress.getLocalHost.getHostName).getOrElse("unknown-localhost")
+ private val _tokenCounter = new AtomicLong
+
+ def apply(): String = {
+ _hostnamePrefix + "-" + String.valueOf(_tokenCounter.incrementAndGet())
+ }
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala
new file mode 100644
index 00000000..bbf40d8d
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala
@@ -0,0 +1,202 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import java.io.ObjectStreamException
+import java.util
+
+import kamon.trace.Status.Closed
+import kamon.trace.TraceContextAware.DefaultTraceContextAware
+import kamon.util.{Function, RelativeNanoTimestamp, SameThreadExecutionContext, Supplier}
+
+import scala.concurrent.Future
+
+trait TraceContext {
+ def name: String
+ def token: String
+ def tags: Map[String, String]
+ def isEmpty: Boolean
+ def nonEmpty: Boolean = !isEmpty
+ def isClosed: Boolean = !(Status.Open == status)
+ def status: Status
+ def finish(): Unit
+ def finishWithError(cause: Throwable): Unit
+ def rename(newName: String): Unit
+ def startSegment(segmentName: String, category: String, library: String): Segment
+ def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment
+ def addMetadata(key: String, value: String): Unit
+ def addTag(key: String, value: String): Unit
+ def removeTag(key: String, value: String): Boolean
+ def startTimestamp: RelativeNanoTimestamp
+
+ def collect[T](f: TraceContext ⇒ T): Option[T] =
+ if (nonEmpty)
+ Some(f(this))
+ else None
+
+ def collect[T](f: Function[TraceContext, T]): Option[T] =
+ if (nonEmpty)
+ Some(f(this))
+ else None
+
+ def withNewSegment[T](segmentName: String, category: String, library: String)(code: ⇒ T): T = {
+ withNewSegment(segmentName, category, library, Map.empty[String, String])(code)
+ }
+
+ def withNewSegment[T](segmentName: String, category: String, library: String, tags: Map[String, String])(code: ⇒ T): T = {
+ val segment = startSegment(segmentName, category, library, tags)
+ try code finally segment.finish()
+ }
+
+ def withNewAsyncSegment[T](segmentName: String, category: String, library: String)(code: ⇒ Future[T]): Future[T] = {
+ withNewAsyncSegment(segmentName, category, library, Map.empty[String, String])(code)
+ }
+
+ def withNewAsyncSegment[T](segmentName: String, category: String, library: String, tags: Map[String, String])(code: ⇒ Future[T]): Future[T] = {
+ val segment = startSegment(segmentName, category, library, tags)
+ val result = code
+ result.onComplete(_ ⇒ segment.finish())(SameThreadExecutionContext)
+ result
+ }
+
+ // Java variant.
+ def withNewSegment[T](segmentName: String, category: String, library: String, code: Supplier[T]): T =
+ withNewSegment(segmentName, category, library)(code.get)
+
+ def withNewSegment[T](segmentName: String, category: String, library: String, tags: util.Map[String, String], code: Supplier[T]): T = {
+ import scala.collection.JavaConverters._
+ withNewSegment(segmentName, category, library, tags.asScala.toMap)(code.get)
+ }
+}
+
+trait Segment {
+ def name: String
+ def category: String
+ def library: String
+ def tags: Map[String, String]
+ def isEmpty: Boolean
+ def nonEmpty: Boolean = !isEmpty
+ def isClosed: Boolean = !(Status.Open == status)
+ def status: Status
+ def finish(): Unit
+ def finishWithError(cause: Throwable): Unit
+ def rename(newName: String): Unit
+ def addMetadata(key: String, value: String): Unit
+ def addTag(key: String, value: String): Unit
+ def removeTag(key: String, value: String): Boolean
+}
+
+case object EmptyTraceContext extends TraceContext {
+ def name: String = "empty-trace"
+ def token: String = ""
+ def tags: Map[String, String] = Map.empty
+ def isEmpty: Boolean = true
+ def status: Status = Closed
+ def finish(): Unit = {}
+ def finishWithError(cause: Throwable): Unit = {}
+ def rename(name: String): Unit = {}
+ def startSegment(segmentName: String, category: String, library: String): Segment = EmptySegment
+ def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = EmptySegment
+ def addMetadata(key: String, value: String): Unit = {}
+ def startTimestamp = new RelativeNanoTimestamp(0L)
+ def addTag(key: String, value: String): Unit = {}
+ def removeTag(key: String, value: String): Boolean = false
+
+ case object EmptySegment extends Segment {
+ val name: String = "empty-segment"
+ val category: String = "empty-category"
+ val library: String = "empty-library"
+ def tags: Map[String, String] = Map.empty
+ def isEmpty: Boolean = true
+ def status: Status = Closed
+ def finish(): Unit = {}
+ def finishWithError(cause: Throwable): Unit = {}
+ def rename(newName: String): Unit = {}
+ def addMetadata(key: String, value: String): Unit = {}
+ def addTag(key: String, value: String): Unit = {}
+ def removeTag(key: String, value: String): Boolean = false
+ }
+}
+
+object SegmentCategory {
+ val HttpClient = "http-client"
+ val Database = "database"
+}
+
+class LOD private[trace] (val level: Int) extends AnyVal
+object LOD {
+ val MetricsOnly = new LOD(1)
+ val SimpleTrace = new LOD(2)
+}
+
+sealed trait LevelOfDetail
+object LevelOfDetail {
+ case object MetricsOnly extends LevelOfDetail
+ case object SimpleTrace extends LevelOfDetail
+ case object FullTrace extends LevelOfDetail
+}
+
+sealed trait Status
+object Status {
+ case object Open extends Status
+ case object Closed extends Status
+ case object FinishedWithError extends Status
+ case object FinishedSuccessfully extends Status
+}
+
+trait TraceContextAware extends Serializable {
+ def traceContext: TraceContext
+}
+
+object TraceContextAware {
+ def default: TraceContextAware = new DefaultTraceContextAware
+
+ class DefaultTraceContextAware extends TraceContextAware {
+ @transient val traceContext = Tracer.currentContext
+
+ //
+ // Beware of this hack, it might bite us in the future!
+ //
+ // When using remoting/cluster all messages carry the TraceContext in the envelope in which they
+ // are sent but that doesn't apply to System Messages. We are certain that the TraceContext is
+ // available (if any) when the system messages are read and this will make sure that it is correctly
+ // captured and propagated.
+ @throws[ObjectStreamException]
+ private def readResolve: AnyRef = {
+ new DefaultTraceContextAware
+ }
+ }
+}
+
+trait TimestampedTraceContextAware extends TraceContextAware {
+ def captureNanoTime: Long
+}
+
+object TimestampedTraceContextAware {
+ def default: TimestampedTraceContextAware = new DefaultTraceContextAware with TimestampedTraceContextAware {
+ @transient val captureNanoTime = System.nanoTime()
+ }
+}
+
+trait SegmentAware {
+ @volatile @transient var segment: Segment = EmptyTraceContext.EmptySegment
+}
+
+object SegmentAware {
+ def default: SegmentAware = new DefaultSegmentAware
+ class DefaultSegmentAware extends DefaultTraceContextAware with SegmentAware {}
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala
new file mode 100644
index 00000000..460e4b22
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala
@@ -0,0 +1,64 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import kamon.trace.TraceLocal.TraceLocalKey
+import kamon.util.Supplier
+import scala.collection.concurrent.TrieMap
+
+object TraceLocal {
+
+ trait TraceLocalKey[T]
+
+ trait AvailableToMdc extends TraceLocalKey[String] {
+ def mdcKey: String
+ }
+
+ object AvailableToMdc {
+ case class DefaultKeyAvailableToMdc(mdcKey: String) extends AvailableToMdc
+
+ def fromKey(mdcKey: String): AvailableToMdc = DefaultKeyAvailableToMdc(mdcKey)
+ def apply(mdcKey: String): AvailableToMdc = fromKey(mdcKey)
+ }
+
+ def store[T](key: TraceLocalKey[T])(value: Any): Unit = Tracer.currentContext match {
+ case ctx: MetricsOnlyContext ⇒ ctx.traceLocalStorage.store(key)(value)
+ case EmptyTraceContext ⇒ // Can't store in the empty context.
+ }
+
+ def retrieve[T](key: TraceLocalKey[T]): Option[T] = Tracer.currentContext match {
+ case ctx: MetricsOnlyContext ⇒ ctx.traceLocalStorage.retrieve(key)
+ case EmptyTraceContext ⇒ None // Can't retrieve anything from the empty context.
+ }
+
+ // Java variant
+ @throws(classOf[NoSuchElementException])
+ def get[T](key: TraceLocalKey[T]): T = retrieve(key).get
+
+ def getOrElse[T](key: TraceLocalKey[T], code: Supplier[T]): T = retrieve(key).getOrElse(code.get)
+
+ def storeForMdc(key: String, value: String): Unit = store(AvailableToMdc.fromKey(key))(value)
+
+ def newTraceLocalKey[T]: TraceLocalKey[T] = new TraceLocalKey[T] {}
+}
+
+class TraceLocalStorage {
+ val underlyingStorage = TrieMap[TraceLocalKey[_], Any]()
+
+ def store[T](key: TraceLocalKey[T])(value: Any): Unit = underlyingStorage.put(key, value)
+ def retrieve[T](key: TraceLocalKey[T]): Option[T] = underlyingStorage.get(key).map(_.asInstanceOf[T])
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala
new file mode 100644
index 00000000..c3a83e93
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala
@@ -0,0 +1,51 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import kamon.util.ConfigTools.Syntax
+import com.typesafe.config.Config
+import kamon.util.NanoInterval
+
+case class TraceSettings(levelOfDetail: LevelOfDetail, sampler: Sampler, tokenGenerator: () ⇒ String)
+
+object TraceSettings {
+ def apply(config: Config): TraceSettings = {
+ val tracerConfig = config.getConfig("kamon.trace")
+
+ val detailLevel: LevelOfDetail = tracerConfig.getString("level-of-detail") match {
+ case "metrics-only" ⇒ LevelOfDetail.MetricsOnly
+ case "simple-trace" ⇒ LevelOfDetail.SimpleTrace
+ case other ⇒ sys.error(s"Unknown tracer level of detail [$other] present in the configuration file.")
+ }
+
+ val sampler: Sampler =
+ if (detailLevel == LevelOfDetail.MetricsOnly) NoSampling
+ else tracerConfig.getString("sampling") match {
+ case "all" ⇒ SampleAll
+ case "random" ⇒ new RandomSampler(tracerConfig.getInt("random-sampler.chance"))
+ case "ordered" ⇒ new OrderedSampler(tracerConfig.getInt("ordered-sampler.sample-interval"))
+ case "threshold" ⇒ new ThresholdSampler(new NanoInterval(tracerConfig.getFiniteDuration("threshold-sampler.minimum-elapsed-time").toNanos))
+ case "clock" ⇒ new ClockSampler(new NanoInterval(tracerConfig.getFiniteDuration("clock-sampler.pause").toNanos))
+ }
+
+ val dynamic = new akka.actor.ReflectiveDynamicAccess(getClass.getClassLoader)
+ val tokenGeneratorFQN = tracerConfig.getString("token-generator")
+ val tokenGenerator = dynamic.createInstanceFor[() ⇒ String](tokenGeneratorFQN, Nil).get // let's bubble up any problems.
+
+ TraceSettings(detailLevel, sampler, tokenGenerator)
+ }
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala
new file mode 100644
index 00000000..47455ec5
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala
@@ -0,0 +1,45 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import akka.actor.{Terminated, ActorRef, Actor}
+
+class TraceSubscriptions extends Actor {
+ import TraceSubscriptions._
+
+ var subscribers: List[ActorRef] = Nil
+
+ def receive = {
+ case Subscribe(newSubscriber) ⇒
+ if (!subscribers.contains(newSubscriber))
+ subscribers = context.watch(newSubscriber) :: subscribers
+
+ case Unsubscribe(leavingSubscriber) ⇒
+ subscribers = subscribers.filter(_ == leavingSubscriber)
+
+ case Terminated(terminatedSubscriber) ⇒
+ subscribers = subscribers.filter(_ == terminatedSubscriber)
+
+ case trace: TraceInfo ⇒
+ subscribers.foreach(_ ! trace)
+ }
+}
+
+object TraceSubscriptions {
+ case class Subscribe(subscriber: ActorRef)
+ case class Unsubscribe(subscriber: ActorRef)
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala
new file mode 100644
index 00000000..552962eb
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala
@@ -0,0 +1,197 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import java.util
+
+import akka.actor._
+import akka.event.{Logging, LoggingAdapter}
+import com.typesafe.config.Config
+import kamon.Kamon
+import kamon.metric.MetricsModule
+import kamon.util._
+
+import scala.collection.JavaConverters._
+
+trait TracerModule {
+ def newContext(name: String): TraceContext
+ def newContext(name: String, token: Option[String]): TraceContext
+ def newContext(name: String, token: Option[String], tags: Map[String, String]): TraceContext
+ def newContext(name: String, token: Option[String], tags: Map[String, String], timestamp: RelativeNanoTimestamp, state: Status, isLocal: Boolean): TraceContext
+
+ def subscribe(subscriber: ActorRef): Unit
+ def unsubscribe(subscriber: ActorRef): Unit
+}
+
+object Tracer {
+ private[kamon] val _traceContextStorage = new ThreadLocal[TraceContext] {
+ override def initialValue(): TraceContext = EmptyTraceContext
+ }
+
+ def currentContext: TraceContext =
+ _traceContextStorage.get()
+
+ def setCurrentContext(context: TraceContext): Unit =
+ _traceContextStorage.set(context)
+
+ def clearCurrentContext: Unit =
+ _traceContextStorage.remove()
+
+ def withContext[T](context: TraceContext)(code: ⇒ T): T = {
+ val oldContext = _traceContextStorage.get()
+ _traceContextStorage.set(context)
+
+ try code finally _traceContextStorage.set(oldContext)
+ }
+
+ // Java variant.
+ def withContext[T](context: TraceContext, code: Supplier[T]): T =
+ withContext(context)(code.get)
+
+ def withNewContext[T](traceName: String, traceToken: Option[String], tags: Map[String, String], autoFinish: Boolean)(code: ⇒ T): T = {
+ withContext(Kamon.tracer.newContext(traceName, traceToken, tags)) {
+ val codeResult = code
+ if (autoFinish)
+ currentContext.finish()
+
+ codeResult
+ }
+ }
+
+ def withNewContext[T](traceName: String)(code: ⇒ T): T =
+ withNewContext(traceName, None)(code)
+
+ def withNewContext[T](traceName: String, tags: Map[String, String])(code: ⇒ T): T =
+ withNewContext(traceName, None, tags)(code)
+
+ def withNewContext[T](traceName: String, traceToken: Option[String])(code: ⇒ T): T =
+ withNewContext(traceName, traceToken, Map.empty[String, String])(code)
+
+ def withNewContext[T](traceName: String, traceToken: Option[String], tags: Map[String, String])(code: ⇒ T): T =
+ withNewContext(traceName, traceToken, tags, autoFinish = false)(code)
+
+ def withNewContext[T](traceName: String, autoFinish: Boolean)(code: ⇒ T): T =
+ withNewContext(traceName, None, Map.empty[String, String], autoFinish)(code)
+
+ def withNewContext[T](traceName: String, tags: Map[String, String], autoFinish: Boolean)(code: ⇒ T): T =
+ withNewContext(traceName, None, tags, autoFinish)(code)
+
+ // Java variants.
+ def withNewContext[T](traceName: String, traceToken: Option[String], autoFinish: Boolean, code: Supplier[T]): T =
+ withNewContext(traceName, traceToken, Map.empty[String, String], autoFinish)(code.get)
+
+ def withNewContext[T](traceName: String, traceToken: Option[String], tags: util.Map[String, String], autoFinish: Boolean, code: Supplier[T]): T =
+ withNewContext(traceName, traceToken, tags.asScala.toMap, autoFinish)(code.get)
+
+ def withNewContext[T](traceName: String, code: Supplier[T]): T =
+ withNewContext(traceName, None, Map.empty[String, String], autoFinish = false)(code.get)
+
+ def withNewContext[T](traceName: String, tags: util.Map[String, String], code: Supplier[T]): T =
+ withNewContext(traceName, None, tags.asScala.toMap, autoFinish = false)(code.get)
+
+ def withNewContext[T](traceName: String, traceToken: Option[String], code: Supplier[T]): T =
+ withNewContext(traceName, traceToken, Map.empty[String, String], autoFinish = false)(code.get)
+
+ def withNewContext[T](traceName: String, traceToken: Option[String], tags: util.Map[String, String], code: Supplier[T]): T =
+ withNewContext(traceName, traceToken, tags.asScala.toMap, autoFinish = false)(code.get)
+
+ def withNewContext[T](traceName: String, autoFinish: Boolean, code: Supplier[T]): T =
+ withNewContext(traceName, None, Map.empty[String, String], autoFinish)(code.get)
+
+ def withNewContext[T](traceName: String, tags: util.Map[String, String], autoFinish: Boolean, code: Supplier[T]): T =
+ withNewContext(traceName, None, tags.asScala.toMap, autoFinish)(code.get)
+}
+
+private[kamon] class TracerModuleImpl(metricsExtension: MetricsModule, config: Config) extends TracerModule {
+ @volatile private var _settings = TraceSettings(config)
+
+ private val _subscriptions = new LazyActorRef
+ private val _incubator = new LazyActorRef
+
+ private def newToken: String = _settings.tokenGenerator()
+
+ def newContext(name: String): TraceContext =
+ createTraceContext(name, None)
+
+ def newContext(name: String, token: Option[String]): TraceContext =
+ createTraceContext(name, token)
+
+ def newContext(name: String, token: Option[String], tags: Map[String, String]): TraceContext =
+ createTraceContext(name, token, tags)
+
+ def newContext(name: String, token: Option[String], tags: Map[String, String], timestamp: RelativeNanoTimestamp, status: Status, isLocal: Boolean): TraceContext =
+ createTraceContext(name, token, tags, timestamp, status, isLocal)
+
+ private def createTraceContext(traceName: String, token: Option[String], tags: Map[String, String] = Map.empty, startTimestamp: RelativeNanoTimestamp = RelativeNanoTimestamp.now,
+ status: Status = Status.Open, isLocal: Boolean = true): TraceContext = {
+
+ def newMetricsOnlyContext(token: String): TraceContext =
+ new MetricsOnlyContext(traceName, token, tags, status, _settings.levelOfDetail, startTimestamp, _logger)
+
+ val traceToken = token.getOrElse(newToken)
+
+ _settings.levelOfDetail match {
+ case LevelOfDetail.MetricsOnly ⇒
+ newMetricsOnlyContext(traceToken)
+ case _ if !isLocal || !_settings.sampler.shouldTrace ⇒
+ newMetricsOnlyContext(traceToken)
+ case _ ⇒
+ new TracingContext(traceName, traceToken, tags, currentStatus = Status.Open, _settings.levelOfDetail, isLocal, startTimestamp, _logger, dispatchTracingContext)
+ }
+ }
+
+ def subscribe(subscriber: ActorRef): Unit =
+ _subscriptions.tell(TraceSubscriptions.Subscribe(subscriber))
+
+ def unsubscribe(subscriber: ActorRef): Unit =
+ _subscriptions.tell(TraceSubscriptions.Unsubscribe(subscriber))
+
+ private[kamon] def dispatchTracingContext(trace: TracingContext): Unit =
+ if (_settings.sampler.shouldReport(trace.elapsedTime))
+ if (trace.shouldIncubate)
+ _incubator.tell(trace)
+ else
+ _subscriptions.tell(trace.generateTraceInfo)
+
+ /**
+ * Tracer Extension initialization.
+ */
+ private var _system: ActorSystem = null
+ private var _logger: LoggingAdapter = null
+ private lazy val _start = {
+ val subscriptions = _system.actorOf(Props[TraceSubscriptions], "trace-subscriptions")
+ _subscriptions.point(subscriptions)
+ _incubator.point(_system.actorOf(Incubator.props(subscriptions)))
+ }
+
+ def start(system: ActorSystem, newConfig: Config): Unit = synchronized {
+ _settings = TraceSettings(newConfig)
+ _system = system
+ _logger = Logging(_system, "TracerModule")
+ _start
+ _system = null
+ }
+}
+
+private[kamon] object TracerModuleImpl {
+
+ def apply(metricsExtension: MetricsModule, config: Config) =
+ new TracerModuleImpl(metricsExtension, config)
+}
+
+case class TraceInfo(name: String, token: String, timestamp: NanoTimestamp, elapsedTime: NanoInterval, metadata: Map[String, String], tags: Map[String, String], segments: List[SegmentInfo], status: Status)
+case class SegmentInfo(name: String, category: String, library: String, timestamp: NanoTimestamp, elapsedTime: NanoInterval, metadata: Map[String, String], tags: Map[String, String], status: Status) \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala
new file mode 100644
index 00000000..ff128f85
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala
@@ -0,0 +1,113 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace
+
+import java.util.concurrent.ConcurrentLinkedQueue
+import java.util.concurrent.atomic.AtomicInteger
+
+import akka.event.LoggingAdapter
+import kamon.util.{NanoInterval, NanoTimestamp, RelativeNanoTimestamp}
+
+import scala.collection.concurrent.TrieMap
+
+private[trace] class TracingContext(
+ traceName: String,
+ token: String,
+ traceTags: Map[String, String],
+ currentStatus: Status,
+ levelOfDetail: LevelOfDetail,
+ isLocal: Boolean,
+ startTimeztamp: RelativeNanoTimestamp,
+ log: LoggingAdapter,
+ traceInfoSink: TracingContext ⇒ Unit
+) extends MetricsOnlyContext(traceName, token, traceTags, currentStatus, levelOfDetail, startTimeztamp, log) {
+
+ private val _openSegments = new AtomicInteger(0)
+ private val _startTimestamp = NanoTimestamp.now
+ private val _allSegments = new ConcurrentLinkedQueue[TracingSegment]()
+ private val _metadata = TrieMap.empty[String, String]
+
+ override def addMetadata(key: String, value: String): Unit = _metadata.put(key, value)
+
+ override def startSegment(segmentName: String, category: String, library: String): Segment = {
+ startSegment(segmentName, category, library, Map.empty[String, String])
+ }
+
+ override def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = {
+ _openSegments.incrementAndGet()
+ val newSegment = new TracingSegment(segmentName, category, library, tags)
+ _allSegments.add(newSegment)
+ newSegment
+ }
+
+ override def finish(): Unit = {
+ super.finish()
+ traceInfoSink(this)
+ }
+
+ override def finishWithError(cause: Throwable): Unit = {
+ super.finishWithError(cause)
+ traceInfoSink(this)
+ }
+
+ override def finishSegment(segmentName: String, category: String, library: String, duration: NanoInterval, tags: Map[String, String], isFinishedWithError: Boolean = false): Unit = {
+ _openSegments.decrementAndGet()
+ super.finishSegment(segmentName, category, library, duration, tags, isFinishedWithError)
+ }
+
+ def shouldIncubate: Boolean = (Status.Open == status) || _openSegments.get() > 0
+
+ // Handle with care, should only be used after a trace is finished.
+ def generateTraceInfo: TraceInfo = {
+ require(isClosed, "Can't generated a TraceInfo if the Trace has not closed yet.")
+
+ val currentSegments = _allSegments.iterator()
+ var segmentsInfo = List.newBuilder[SegmentInfo]
+
+ while (currentSegments.hasNext) {
+ val segment = currentSegments.next()
+ if (segment.isClosed)
+ segmentsInfo += segment.createSegmentInfo(_startTimestamp, startTimestamp)
+ else
+ log.warning("Segment [{}] will be left out of TraceInfo because it was still open.", segment.name)
+ }
+
+ TraceInfo(name, token, _startTimestamp, elapsedTime, _metadata.toMap, tags, segmentsInfo.result(), status)
+ }
+
+ class TracingSegment(
+ segmentName: String,
+ category: String,
+ library: String,
+ segmentTags: Map[String, String]
+ ) extends MetricsOnlySegment(segmentName, category, library, segmentTags) {
+
+ private val metadata = TrieMap.empty[String, String]
+ override def addMetadata(key: String, value: String): Unit = metadata.put(key, value)
+
+ // Handle with care, should only be used after the segment has finished.
+ def createSegmentInfo(traceStartTimestamp: NanoTimestamp, traceRelativeTimestamp: RelativeNanoTimestamp): SegmentInfo = {
+ require(isClosed, "Can't generated a SegmentInfo if the Segment has not closed yet.")
+
+ // We don't have a epoch-based timestamp for the segments because calling System.currentTimeMillis() is both
+ // expensive and inaccurate, but we can do that once for the trace and calculate all the segments relative to it.
+ val segmentStartTimestamp = new NanoTimestamp((this.startTimestamp.nanos - traceRelativeTimestamp.nanos) + traceStartTimestamp.nanos)
+
+ SegmentInfo(this.name, category, library, segmentStartTimestamp, this.elapsedTime, metadata.toMap, tags, status)
+ }
+ }
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala b/kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala
new file mode 100644
index 00000000..8177ed14
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala
@@ -0,0 +1,26 @@
+/* ===================================================
+ * Copyright © 2013 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================================================== */
+package kamon.trace.logging
+
+import ch.qos.logback.classic.pattern.ClassicConverter
+import ch.qos.logback.classic.spi.ILoggingEvent
+import kamon.trace.Tracer
+
+class LogbackTraceTokenConverter extends ClassicConverter {
+
+ def convert(event: ILoggingEvent): String =
+ Tracer.currentContext.collect(_.token).getOrElse("undefined")
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala b/kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala
new file mode 100644
index 00000000..556366b0
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala
@@ -0,0 +1,54 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.trace.logging
+
+import kamon.trace.TraceLocal.AvailableToMdc
+import kamon.trace.{EmptyTraceContext, MetricsOnlyContext, TraceContext, Tracer}
+import kamon.util.Supplier
+import org.slf4j.MDC
+
+trait MdcKeysSupport {
+
+ val traceTokenKey = "traceToken"
+ val traceNameKey = "traceName"
+
+ private val defaultKeys = Seq(traceTokenKey, traceNameKey)
+
+ def withMdc[A](thunk: ⇒ A): A = {
+ val keys = copyToMdc(Tracer.currentContext)
+ try thunk finally keys.foreach(key ⇒ MDC.remove(key))
+ }
+
+ // Java variant.
+ def withMdc[A](thunk: Supplier[A]): A = withMdc(thunk.get)
+
+ private[kamon] def copyToMdc(traceContext: TraceContext): Iterable[String] = traceContext match {
+ case ctx: MetricsOnlyContext ⇒
+
+ // Add the default key value pairs for the trace token and trace name.
+ MDC.put(traceTokenKey, ctx.token)
+ MDC.put(traceNameKey, ctx.name)
+
+ defaultKeys ++ ctx.traceLocalStorage.underlyingStorage.collect {
+ case (available: AvailableToMdc, value) ⇒ Map(available.mdcKey → String.valueOf(value))
+ }.flatMap { value ⇒ value.map { case (k, v) ⇒ MDC.put(k, v); k } }
+
+ case EmptyTraceContext ⇒ Iterable.empty[String]
+ }
+}
+
+object MdcKeysSupport extends MdcKeysSupport \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala b/kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala
new file mode 100644
index 00000000..cbf530e4
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala
@@ -0,0 +1,48 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+import java.util.concurrent.TimeUnit
+
+import com.typesafe.config.Config
+
+import scala.concurrent.duration.FiniteDuration
+
+import kamon.metric.instrument.{Memory, Time}
+
+object ConfigTools {
+ implicit class Syntax(val config: Config) extends AnyVal {
+ // We are using the deprecated .getNanoseconds option to keep Kamon source code compatible with
+ // versions of Akka using older typesafe-config versions.
+
+ def getFiniteDuration(path: String): FiniteDuration =
+ FiniteDuration(config.getDuration(path, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS)
+
+ def firstLevelKeys: Set[String] = {
+ import scala.collection.JavaConverters._
+
+ config.entrySet().asScala.map {
+ case entry ⇒ entry.getKey.takeWhile(_ != '.')
+ } toSet
+ }
+
+ def time(path: String): Time = Time(config.getString(path))
+
+ def memory(path: String): Memory = Memory(config.getString(path))
+ }
+
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala b/kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala
new file mode 100644
index 00000000..7a94aefc
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala
@@ -0,0 +1,38 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+import akka.actor.ActorRef
+
+import scala.concurrent.{ExecutionContext, Future}
+
+/**
+ * Extension for Future[ActorRef]. Try to dispatch a message to a Future[ActorRef] in the same thread if it has already
+ * completed or do the regular scheduling otherwise. Specially useful when using the ModuleSupervisor extension to
+ * create actors.
+ */
+object FastDispatch {
+ implicit class Syntax(val target: Future[ActorRef]) extends AnyVal {
+
+ def fastDispatch(message: Any)(implicit ec: ExecutionContext): Unit =
+ if (target.isCompleted)
+ target.value.get.map(_ ! message)
+ else
+ target.map(_ ! message)
+ }
+
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala b/kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala
new file mode 100644
index 00000000..8dab6519
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala
@@ -0,0 +1,25 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+trait Supplier[T] {
+ def get: T
+}
+
+trait Function[T, R] {
+ def apply(t: T): R
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala b/kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala
new file mode 100644
index 00000000..90bece5c
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala
@@ -0,0 +1,14 @@
+package kamon.util
+
+object JavaTags {
+
+ /**
+ * Helper method to transform Java maps into Scala maps. Typically this will be used as a static import from
+ * Java code that wants to use tags, as tags are defined as scala.collection.mutable.Map[String, String] and
+ * creating them directly from Java is quite verbose.
+ */
+ def tagsFromMap(tags: java.util.Map[String, String]): Map[String, String] = {
+ import scala.collection.JavaConversions._
+ tags.toMap
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/Latency.scala b/kamon-core/src/legacy-main/scala/kamon/util/Latency.scala
new file mode 100644
index 00000000..52e044f8
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/Latency.scala
@@ -0,0 +1,29 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+import kamon.metric.instrument.Histogram
+
+object Latency {
+ def measure[A](histogram: Histogram)(thunk: ⇒ A): A = {
+ val start = RelativeNanoTimestamp.now
+ try thunk finally {
+ val latency = NanoInterval.since(start).nanos
+ histogram.record(latency)
+ }
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala b/kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala
new file mode 100644
index 00000000..a07abea6
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala
@@ -0,0 +1,69 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+import java.util
+import java.util.concurrent.ConcurrentLinkedQueue
+
+import akka.actor.{Actor, ActorRef}
+import org.HdrHistogram.WriterReaderPhaser
+
+import scala.annotation.tailrec
+
+/**
+ * A LazyActorRef accumulates messages sent to an actor that doesn't exist yet. Once the actor is created and
+ * the LazyActorRef is pointed to it, all the accumulated messages are flushed and any new message sent to the
+ * LazyActorRef will immediately be sent to the pointed ActorRef.
+ *
+ * This is intended to be used during Kamon's initialization where some components need to use ActorRefs to work
+ * (like subscriptions and the trace incubator) but our internal ActorSystem is not yet ready to create the
+ * required actors.
+ */
+class LazyActorRef {
+ private val _refPhaser = new WriterReaderPhaser
+ private val _backlog = new ConcurrentLinkedQueue[(Any, ActorRef)]()
+ @volatile private var _target: Option[ActorRef] = None
+
+ def tell(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = {
+ val criticalEnter = _refPhaser.writerCriticalSectionEnter()
+ try {
+ _target.map(_.tell(message, sender)) getOrElse {
+ _backlog.add((message, sender))
+ }
+
+ } finally { _refPhaser.writerCriticalSectionExit(criticalEnter) }
+ }
+
+ def point(target: ActorRef): Unit = {
+ @tailrec def drain(q: util.Queue[(Any, ActorRef)]): Unit = if (!q.isEmpty) {
+ val (msg, sender) = q.poll()
+ target.tell(msg, sender)
+ drain(q)
+ }
+
+ try {
+ _refPhaser.readerLock()
+
+ if (_target.isEmpty) {
+ _target = Some(target)
+ _refPhaser.flipPhase(1000L)
+ drain(_backlog)
+
+ } else sys.error("A LazyActorRef cannot be pointed more than once.")
+ } finally { _refPhaser.readerUnlock() }
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala b/kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala
new file mode 100644
index 00000000..6fc6fb15
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala
@@ -0,0 +1,43 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+object MapMerge {
+
+ /**
+ * Merge two immutable maps with the same key and value types, using the provided valueMerge function.
+ */
+ implicit class Syntax[K, V](val map: Map[K, V]) extends AnyVal {
+ def merge(that: Map[K, V], valueMerge: (V, V) ⇒ V): Map[K, V] = {
+ val merged = Map.newBuilder[K, V]
+
+ map.foreach {
+ case (key, value) ⇒
+ val mergedValue = that.get(key).map(v ⇒ valueMerge(value, v)).getOrElse(value)
+ merged += key → mergedValue
+ }
+
+ that.foreach {
+ case kv @ (key, _) if !map.contains(key) ⇒ merged += kv
+ case other ⇒ // ignore, already included.
+ }
+
+ merged.result();
+ }
+ }
+
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala b/kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala
new file mode 100644
index 00000000..1397050f
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala
@@ -0,0 +1,37 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+import com.typesafe.config.Config
+import kamon.metric.instrument.{Memory, Time}
+import kamon.util.ConfigTools._
+
+object NeedToScale {
+ val TimeUnits = "time-units"
+ val MemoryUnits = "memory-units"
+
+ def unapply(config: Config): Option[(Option[Time], Option[Memory])] = {
+ val scaleTimeTo: Option[Time] =
+ if (config.hasPath(TimeUnits)) Some(config.time(TimeUnits)) else None
+
+ val scaleMemoryTo: Option[Memory] =
+ if (config.hasPath(MemoryUnits)) Some(config.memory(MemoryUnits)) else None
+ if (scaleTimeTo.isDefined || scaleMemoryTo.isDefined) Some(scaleTimeTo → scaleMemoryTo)
+ else None
+ }
+}
+
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala b/kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala
new file mode 100644
index 00000000..9019eb4c
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala
@@ -0,0 +1,25 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+import java.util.concurrent.atomic.AtomicLong
+
+class PaddedAtomicLong(value: Long = 0) extends AtomicLong(value) {
+ @volatile var p1, p2, p3, p4, p5, p6 = 7L
+
+ protected def sumPaddingToPreventOptimisation() = p1 + p2 + p3 + p4 + p5 + p6
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala b/kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala
new file mode 100644
index 00000000..50e14ace
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala
@@ -0,0 +1,5 @@
+package kamon.util
+
+trait PathFilter {
+ def accept(path: String): Boolean
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala b/kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala
new file mode 100644
index 00000000..848fca87
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala
@@ -0,0 +1,27 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+case class RegexPathFilter(path: String) extends PathFilter {
+ private val pathRegex = path.r
+ override def accept(path: String): Boolean = {
+ path match {
+ case pathRegex(_*) ⇒ true
+ case _ ⇒ false
+ }
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala b/kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala
new file mode 100644
index 00000000..5fb0d066
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala
@@ -0,0 +1,30 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+import kamon.util.logger.LazyLogger
+import scala.concurrent.ExecutionContext
+
+/**
+ * For small code blocks that don't need to be run on a separate thread.
+ */
+object SameThreadExecutionContext extends ExecutionContext {
+ val logger = LazyLogger("SameThreadExecutionContext")
+
+ override def execute(runnable: Runnable): Unit = runnable.run
+ override def reportFailure(t: Throwable): Unit = logger.error(t.getMessage, t)
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala b/kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala
new file mode 100644
index 00000000..f368e54f
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala
@@ -0,0 +1,40 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2014 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+/**
+ * This class implements an extremely efficient, thread-safe way to generate a
+ * incrementing sequence of Longs with a simple Long overflow protection.
+ */
+class Sequencer {
+ private val CloseToOverflow = Long.MaxValue - 1000000000
+ private val sequenceNumber = new PaddedAtomicLong(1L)
+
+ def next(): Long = {
+ val current = sequenceNumber.getAndIncrement
+
+ // check if this value is getting close to overflow?
+ if (current > CloseToOverflow) {
+ sequenceNumber.set(current - CloseToOverflow) // we need maintain the order
+ }
+ current
+ }
+}
+
+object Sequencer {
+ def apply(): Sequencer = new Sequencer()
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala b/kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala
new file mode 100644
index 00000000..002dc470
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala
@@ -0,0 +1,107 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util
+
+/**
+ * Epoch time stamp.
+ */
+case class Timestamp(seconds: Long) extends AnyVal {
+ def <(that: Timestamp): Boolean = this.seconds < that.seconds
+ def >(that: Timestamp): Boolean = this.seconds > that.seconds
+ def ==(that: Timestamp): Boolean = this.seconds == that.seconds
+ def >=(that: Timestamp): Boolean = this.seconds >= that.seconds
+ def <=(that: Timestamp): Boolean = this.seconds <= that.seconds
+
+ override def toString: String = String.valueOf(seconds) + ".seconds"
+}
+
+object Timestamp {
+ def now: Timestamp = new Timestamp(System.currentTimeMillis() / 1000)
+ def earlier(l: Timestamp, r: Timestamp): Timestamp = if (l <= r) l else r
+ def later(l: Timestamp, r: Timestamp): Timestamp = if (l >= r) l else r
+}
+
+/**
+ * Epoch time stamp in milliseconds.
+ */
+case class MilliTimestamp(millis: Long) extends AnyVal {
+ override def toString: String = String.valueOf(millis) + ".millis"
+
+ def toTimestamp: Timestamp = new Timestamp(millis / 1000)
+ def toRelativeNanoTimestamp: RelativeNanoTimestamp = {
+ val diff = (System.currentTimeMillis() - millis) * 1000000
+ new RelativeNanoTimestamp(System.nanoTime() - diff)
+ }
+}
+
+object MilliTimestamp {
+ def now: MilliTimestamp = new MilliTimestamp(System.currentTimeMillis())
+}
+
+/**
+ * Epoch time stamp in nanoseconds.
+ *
+ * NOTE: This doesn't have any better precision than MilliTimestamp, it is just a convenient way to get a epoch
+ * timestamp in nanoseconds.
+ */
+case class NanoTimestamp(nanos: Long) extends AnyVal {
+ def -(that: NanoTimestamp) = new NanoTimestamp(nanos - that.nanos)
+ def +(that: NanoTimestamp) = new NanoTimestamp(nanos + that.nanos)
+ override def toString: String = String.valueOf(nanos) + ".nanos"
+}
+
+object NanoTimestamp {
+ def now: NanoTimestamp = new NanoTimestamp(System.currentTimeMillis() * 1000000)
+}
+
+/**
+ * Number of nanoseconds between a arbitrary origin timestamp provided by the JVM via System.nanoTime()
+ */
+case class RelativeNanoTimestamp(nanos: Long) extends AnyVal {
+ def -(that: RelativeNanoTimestamp) = new RelativeNanoTimestamp(nanos - that.nanos)
+ def +(that: RelativeNanoTimestamp) = new RelativeNanoTimestamp(nanos + that.nanos)
+ override def toString: String = String.valueOf(nanos) + ".nanos"
+
+ def toMilliTimestamp: MilliTimestamp =
+ new MilliTimestamp(System.currentTimeMillis - ((System.nanoTime - nanos) / 1000000))
+}
+
+object RelativeNanoTimestamp {
+ val zero = new RelativeNanoTimestamp(0L)
+
+ def now: RelativeNanoTimestamp = new RelativeNanoTimestamp(System.nanoTime())
+ def relativeTo(milliTimestamp: MilliTimestamp): RelativeNanoTimestamp =
+ new RelativeNanoTimestamp(now.nanos - (MilliTimestamp.now.millis - milliTimestamp.millis) * 1000000)
+}
+
+/**
+ * Number of nanoseconds that passed between two points in time.
+ */
+case class NanoInterval(nanos: Long) extends AnyVal {
+ def <(that: NanoInterval): Boolean = this.nanos < that.nanos
+ def >(that: NanoInterval): Boolean = this.nanos > that.nanos
+ def ==(that: NanoInterval): Boolean = this.nanos == that.nanos
+ def >=(that: NanoInterval): Boolean = this.nanos >= that.nanos
+ def <=(that: NanoInterval): Boolean = this.nanos <= that.nanos
+
+ override def toString: String = String.valueOf(nanos) + ".nanos"
+}
+
+object NanoInterval {
+ def default: NanoInterval = new NanoInterval(0L)
+ def since(relative: RelativeNanoTimestamp): NanoInterval = new NanoInterval(System.nanoTime() - relative.nanos)
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala b/kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala
new file mode 100644
index 00000000..d1197a5a
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala
@@ -0,0 +1,45 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+package kamon.util
+
+import scala.collection.concurrent.TrieMap
+
+object TriemapAtomicGetOrElseUpdate {
+
+ /**
+ * Workaround to the non thread-safe [[scala.collection.concurrent.TrieMap#getOrElseUpdate]] method. More details on
+ * why this is necessary can be found at [[https://issues.scala-lang.org/browse/SI-7943]].
+ */
+ implicit class Syntax[K, V](val trieMap: TrieMap[K, V]) extends AnyVal {
+
+ def atomicGetOrElseUpdate(key: K, op: ⇒ V): V =
+ atomicGetOrElseUpdate(key, op, { v: V ⇒ Unit })
+
+ def atomicGetOrElseUpdate(key: K, op: ⇒ V, cleanup: V ⇒ Unit): V =
+ trieMap.get(key) match {
+ case Some(v) ⇒ v
+ case None ⇒
+ val d = op
+ trieMap.putIfAbsent(key, d).map { oldValue ⇒
+ // If there was an old value then `d` was never added
+ // and thus need to be cleanup.
+ cleanup(d)
+ oldValue
+
+ } getOrElse (d)
+ }
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala
new file mode 100644
index 00000000..a4bca570
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala
@@ -0,0 +1,99 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2016 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util.executors
+
+import kamon.metric.{EntityRecorderFactory, GenericEntityRecorder}
+import kamon.metric.instrument.{Gauge, MinMaxCounter, DifferentialValueCollector, InstrumentFactory}
+import java.util.concurrent.{ForkJoinPool ⇒ JavaForkJoinPool, ThreadPoolExecutor}
+import kamon.util.executors.ForkJoinPools.ForkJoinMetrics
+
+import scala.concurrent.forkjoin.ForkJoinPool
+
+object ForkJoinPools extends ForkJoinLowPriority {
+ trait ForkJoinMetrics[T] {
+ def getParallelism(fjp: T): Long
+ def getPoolSize(fjp: T): Long
+ def getActiveThreadCount(fjp: T): Long
+ def getRunningThreadCount(fjp: T): Long
+ def getQueuedTaskCount(fjp: T): Long
+ def getQueuedSubmissionCount(fjp: T): Long
+ }
+
+ implicit object JavaForkJoin extends ForkJoinMetrics[JavaForkJoinPool] {
+ def getParallelism(fjp: JavaForkJoinPool) = fjp.getParallelism
+ def getPoolSize(fjp: JavaForkJoinPool) = fjp.getPoolSize.toLong
+ def getRunningThreadCount(fjp: JavaForkJoinPool) = fjp.getActiveThreadCount.toLong
+ def getActiveThreadCount(fjp: JavaForkJoinPool) = fjp.getRunningThreadCount.toLong
+ def getQueuedTaskCount(fjp: JavaForkJoinPool) = fjp.getQueuedTaskCount
+ def getQueuedSubmissionCount(fjp: JavaForkJoinPool) = fjp.getQueuedSubmissionCount
+ }
+}
+
+trait ForkJoinLowPriority {
+ implicit object ScalaForkJoin extends ForkJoinMetrics[ForkJoinPool] {
+ def getParallelism(fjp: ForkJoinPool) = fjp.getParallelism
+ def getPoolSize(fjp: ForkJoinPool) = fjp.getPoolSize.toLong
+ def getRunningThreadCount(fjp: ForkJoinPool) = fjp.getActiveThreadCount.toLong
+ def getActiveThreadCount(fjp: ForkJoinPool) = fjp.getRunningThreadCount.toLong
+ def getQueuedTaskCount(fjp: ForkJoinPool) = fjp.getQueuedTaskCount
+ def getQueuedSubmissionCount(fjp: ForkJoinPool) = fjp.getQueuedSubmissionCount
+ }
+}
+
+abstract class ForkJoinPoolMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) {
+ def paralellism: MinMaxCounter
+ def poolSize: Gauge
+ def activeThreads: Gauge
+ def runningThreads: Gauge
+ def queuedTaskCount: Gauge
+ def queuedSubmissionCount: Gauge
+}
+
+object ForkJoinPoolMetrics {
+ def factory[T: ForkJoinMetrics](fjp: T, categoryName: String) = new EntityRecorderFactory[ForkJoinPoolMetrics] {
+ val forkJoinMetrics = implicitly[ForkJoinMetrics[T]]
+
+ def category: String = categoryName
+ def createRecorder(instrumentFactory: InstrumentFactory) = new ForkJoinPoolMetrics(instrumentFactory) {
+ val paralellism = minMaxCounter("parallelism")
+ paralellism.increment(forkJoinMetrics.getParallelism(fjp)) // Steady value.
+
+ val poolSize = gauge("pool-size", forkJoinMetrics.getPoolSize(fjp))
+ val activeThreads = gauge("active-threads", forkJoinMetrics.getActiveThreadCount(fjp))
+ val runningThreads = gauge("running-threads", forkJoinMetrics.getRunningThreadCount(fjp))
+ val queuedTaskCount = gauge("queued-task-count", forkJoinMetrics.getQueuedTaskCount(fjp))
+ val queuedSubmissionCount = gauge("queued-submission-count", forkJoinMetrics.getQueuedSubmissionCount(fjp))
+ }
+ }
+}
+
+class ThreadPoolExecutorMetrics(tpe: ThreadPoolExecutor, instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) {
+ val corePoolSize = gauge("core-pool-size", tpe.getCorePoolSize.toLong)
+ val maxPoolSize = gauge("max-pool-size", tpe.getMaximumPoolSize.toLong)
+ val poolSize = gauge("pool-size", tpe.getPoolSize.toLong)
+ val activeThreads = gauge("active-threads", tpe.getActiveCount.toLong)
+ val processedTasks = gauge("processed-tasks", DifferentialValueCollector(() ⇒ {
+ tpe.getTaskCount
+ }))
+}
+
+object ThreadPoolExecutorMetrics {
+ def factory(tpe: ThreadPoolExecutor, cat: String) = new EntityRecorderFactory[ThreadPoolExecutorMetrics] {
+ def category: String = cat
+ def createRecorder(instrumentFactory: InstrumentFactory) = new ThreadPoolExecutorMetrics(tpe, instrumentFactory)
+ }
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala
new file mode 100644
index 00000000..60612beb
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala
@@ -0,0 +1,134 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util.executors
+
+import java.util.concurrent.{ExecutorService, ForkJoinPool ⇒ JavaForkJoinPool, ThreadPoolExecutor}
+
+import kamon.Kamon
+import kamon.metric.Entity
+
+import scala.concurrent.forkjoin.ForkJoinPool
+import scala.util.control.NoStackTrace
+
+object ExecutorServiceMetrics {
+ val Category = "executor-service"
+
+ private val DelegatedExecutor = Class.forName("java.util.concurrent.Executors$DelegatedExecutorService")
+ private val FinalizableDelegated = Class.forName("java.util.concurrent.Executors$FinalizableDelegatedExecutorService")
+ private val DelegateScheduled = Class.forName("java.util.concurrent.Executors$DelegatedScheduledExecutorService")
+ private val JavaForkJoinPool = classOf[JavaForkJoinPool]
+ private val ScalaForkJoinPool = classOf[ForkJoinPool]
+
+ private val executorField = {
+ // executorService is private :(
+ val field = DelegatedExecutor.getDeclaredField("e")
+ field.setAccessible(true)
+ field
+ }
+
+ /**
+ *
+ * Register the [[http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html ThreadPoolExecutor]] to Monitor.
+ *
+ * @param name The name of the [[ThreadPoolExecutor]]
+ * @param threadPool The intance of the [[ThreadPoolExecutor]]
+ * @param tags The tags associated to the [[ThreadPoolExecutor]]
+ */
+ @inline private def registerThreadPool(name: String, threadPool: ThreadPoolExecutor, tags: Map[String, String]): Entity = {
+ val threadPoolEntity = Entity(name, Category, tags + ("executor-type" → "thread-pool-executor"))
+ Kamon.metrics.entity(ThreadPoolExecutorMetrics.factory(threadPool, Category), threadPoolEntity)
+ threadPoolEntity
+ }
+
+ /**
+ *
+ * Register the [[http://www.scala-lang.org/api/current/index.html#scala.collection.parallel.TaskSupport ForkJoinPool]] to Monitor.
+ *
+ * @param name The name of the [[ForkJoinPool]]
+ * @param forkJoinPool The instance of the [[ForkJoinPool]]
+ * @param tags The tags associated to the [[ForkJoinPool]]
+ */
+ @inline private def registerScalaForkJoin(name: String, forkJoinPool: ForkJoinPool, tags: Map[String, String]): Entity = {
+ val forkJoinEntity = Entity(name, Category, tags + ("executor-type" → "fork-join-pool"))
+ Kamon.metrics.entity(ForkJoinPoolMetrics.factory(forkJoinPool, Category), forkJoinEntity)
+ forkJoinEntity
+ }
+
+ /**
+ *
+ * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ForkJoinPool.html JavaForkJoinPool]] to Monitor.
+ *
+ * @param name The name of the [[JavaForkJoinPool]]
+ * @param forkJoinPool The instance of the [[JavaForkJoinPool]]
+ * @param tags The tags associated to the [[JavaForkJoinPool]]
+ */
+ @inline private def registerJavaForkJoin(name: String, forkJoinPool: JavaForkJoinPool, tags: Map[String, String]): Entity = {
+ val forkJoinEntity = Entity(name, Category, tags + ("executor-type" → "fork-join-pool"))
+ Kamon.metrics.entity(ForkJoinPoolMetrics.factory(forkJoinPool, Category), forkJoinEntity)
+ forkJoinEntity
+ }
+
+ /**
+ *
+ * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html ExecutorService]] to Monitor.
+ *
+ * @param name The name of the [[ExecutorService]]
+ * @param executorService The instance of the [[ExecutorService]]
+ * @param tags The tags associated to the [[ExecutorService]]
+ */
+ def register(name: String, executorService: ExecutorService, tags: Map[String, String]): Entity = executorService match {
+ case threadPoolExecutor: ThreadPoolExecutor ⇒ registerThreadPool(name, threadPoolExecutor, tags)
+ case scalaForkJoinPool: ForkJoinPool if scalaForkJoinPool.getClass.isAssignableFrom(ScalaForkJoinPool) ⇒ registerScalaForkJoin(name, scalaForkJoinPool, tags)
+ case javaForkJoinPool: JavaForkJoinPool if javaForkJoinPool.getClass.isAssignableFrom(JavaForkJoinPool) ⇒ registerJavaForkJoin(name, javaForkJoinPool, tags)
+ case delegatedExecutor: ExecutorService if delegatedExecutor.getClass.isAssignableFrom(DelegatedExecutor) ⇒ registerDelegatedExecutor(name, delegatedExecutor, tags)
+ case delegatedScheduledExecutor: ExecutorService if delegatedScheduledExecutor.getClass.isAssignableFrom(DelegateScheduled) ⇒ registerDelegatedExecutor(name, delegatedScheduledExecutor, tags)
+ case finalizableDelegatedExecutor: ExecutorService if finalizableDelegatedExecutor.getClass.isAssignableFrom(FinalizableDelegated) ⇒ registerDelegatedExecutor(name, finalizableDelegatedExecutor, tags)
+ case other ⇒ throw NotSupportedException(s"The ExecutorService $name is not supported.")
+ }
+
+ //Java variant
+ def register(name: String, executorService: ExecutorService, tags: java.util.Map[String, String]): Entity = {
+ import scala.collection.JavaConverters._
+ register(name, executorService, tags.asScala.toMap)
+ }
+
+ /**
+ *
+ * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html ExecutorService]] to Monitor.
+ *
+ * @param name The name of the [[ExecutorService]]
+ * @param executorService The instance of the [[ExecutorService]]
+ */
+ def register(name: String, executorService: ExecutorService): Entity = {
+ register(name, executorService, Map.empty[String, String])
+ }
+
+ def remove(entity: Entity): Unit = Kamon.metrics.removeEntity(entity)
+
+ /**
+ * INTERNAL USAGE ONLY
+ */
+ private def registerDelegatedExecutor(name: String, executor: ExecutorService, tags: Map[String, String]) = {
+ val underlyingExecutor = executorField.get(executor) match {
+ case executorService: ExecutorService ⇒ executorService
+ case other ⇒ other
+ }
+ register(name, underlyingExecutor.asInstanceOf[ExecutorService], tags)
+ }
+
+ case class NotSupportedException(message: String) extends RuntimeException with NoStackTrace
+} \ No newline at end of file
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala b/kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala
new file mode 100644
index 00000000..929fef4f
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala
@@ -0,0 +1,41 @@
+/*
+ * =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util.http
+
+import kamon.metric.{EntityRecorderFactory, GenericEntityRecorder}
+import kamon.metric.instrument.InstrumentFactory
+
+/**
+ * Counts HTTP response status codes into per status code and per trace name + status counters. If recording a HTTP
+ * response with status 500 for the trace "GetUser", the counter with name "500" as well as the counter with name
+ * "GetUser_500" will be incremented.
+ */
+class HttpServerMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) {
+
+ def recordResponse(statusCode: String): Unit =
+ counter(statusCode).increment()
+
+ def recordResponse(traceName: String, statusCode: String): Unit = {
+ recordResponse(statusCode)
+ counter(traceName + "_" + statusCode).increment()
+ }
+}
+
+object HttpServerMetrics extends EntityRecorderFactory[HttpServerMetrics] {
+ def category: String = "http-server"
+ def createRecorder(instrumentFactory: InstrumentFactory): HttpServerMetrics = new HttpServerMetrics(instrumentFactory)
+}
diff --git a/kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala b/kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala
new file mode 100644
index 00000000..11be7bbe
--- /dev/null
+++ b/kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala
@@ -0,0 +1,49 @@
+/* =========================================================================================
+ * Copyright © 2013-2015 the kamon project <http://kamon.io/>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
+ * except in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the
+ * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+ * either express or implied. See the License for the specific language governing permissions
+ * and limitations under the License.
+ * =========================================================================================
+ */
+
+package kamon.util.logger
+
+import org.slf4j.{Logger ⇒ SLF4JLogger}
+
+class LazyLogger(val logger: SLF4JLogger) {
+
+ @inline final def isTraceEnabled = logger.isTraceEnabled
+ @inline final def trace(msg: ⇒ String): Unit = if (isTraceEnabled) logger.trace(msg.toString)
+ @inline final def trace(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isTraceEnabled) logger.trace(msg, t)
+
+ @inline final def isDebugEnabled = logger.isDebugEnabled
+ @inline final def debug(msg: ⇒ String): Unit = if (isDebugEnabled) logger.debug(msg.toString)
+ @inline final def debug(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isDebugEnabled) logger.debug(msg, t)
+
+ @inline final def isErrorEnabled = logger.isErrorEnabled
+ @inline final def error(msg: ⇒ String): Unit = if (isErrorEnabled) logger.error(msg.toString)
+ @inline final def error(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isErrorEnabled) logger.error(msg, t)
+
+ @inline final def isInfoEnabled = logger.isInfoEnabled
+ @inline final def info(msg: ⇒ String): Unit = if (isInfoEnabled) logger.info(msg.toString)
+ @inline final def info(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isInfoEnabled) logger.info(msg, t)
+
+ @inline final def isWarnEnabled = logger.isWarnEnabled
+ @inline final def warn(msg: ⇒ String): Unit = if (isWarnEnabled) logger.warn(msg.toString)
+ @inline final def warn(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isWarnEnabled) logger.warn(msg, t)
+}
+
+object LazyLogger {
+ import scala.reflect.{classTag, ClassTag}
+
+ def apply(name: String): LazyLogger = new LazyLogger(org.slf4j.LoggerFactory.getLogger(name))
+ def apply(cls: Class[_]): LazyLogger = apply(cls.getName)
+ def apply[C: ClassTag](): LazyLogger = apply(classTag[C].runtimeClass.getName)
+} \ No newline at end of file