From 4d828e1a3195e55365c865aa3a78af9668742643 Mon Sep 17 00:00:00 2001 From: Ivan Topolnjak Date: Mon, 24 Apr 2017 13:54:40 +0200 Subject: Prepare for the major cleanup Moved all the original files from src/main to src/legacy-main, same with test files. Also removed the autoweave module, examples and bench as I'm planning to have them in separate repositories. --- .../legacy-main/java/kamon/jsr166/LongAdder.java | 224 ++++++++++++ .../java/kamon/jsr166/LongMaxUpdater.java | 191 ++++++++++ .../legacy-main/java/kamon/jsr166/Striped64.java | 371 +++++++++++++++++++ .../java/kamon/util/GlobPathFilter.java | 110 ++++++ .../src/legacy-main/resources/META-INF/aop.xml | 15 + .../src/legacy-main/resources/reference.conf | 184 ++++++++++ .../scala-2.10/kamon/ActorSystemTools.scala | 25 ++ .../scala-2.11/kamon/ActorSystemTools.scala | 25 ++ .../scala-2.12/kamon/ActorSystemTools.scala | 25 ++ kamon-core/src/legacy-main/scala/kamon/Kamon.scala | 115 ++++++ .../src/legacy-main/scala/kamon/ModuleLoader.scala | 128 +++++++ .../legacy-main/scala/kamon/metric/Entity.scala | 37 ++ .../scala/kamon/metric/EntityRecorder.scala | 235 ++++++++++++ .../scala/kamon/metric/EntitySnapshot.scala | 63 ++++ .../legacy-main/scala/kamon/metric/MetricKey.scala | 47 +++ .../scala/kamon/metric/MetricScaleDecorator.scala | 57 +++ .../scala/kamon/metric/MetricsModule.scala | 394 +++++++++++++++++++++ .../scala/kamon/metric/MetricsSettings.scala | 123 +++++++ .../kamon/metric/SubscriptionsDispatcher.scala | 116 ++++++ .../kamon/metric/TickMetricSnapshotBuffer.scala | 65 ++++ .../scala/kamon/metric/TraceMetrics.scala | 53 +++ .../scala/kamon/metric/instrument/Counter.scala | 65 ++++ .../scala/kamon/metric/instrument/Gauge.scala | 120 +++++++ .../scala/kamon/metric/instrument/Histogram.scala | 331 +++++++++++++++++ .../scala/kamon/metric/instrument/Instrument.scala | 51 +++ .../metric/instrument/InstrumentFactory.scala | 51 +++ .../metric/instrument/InstrumentSettings.scala | 73 ++++ .../kamon/metric/instrument/MinMaxCounter.scala | 105 ++++++ .../instrument/ModifiedAtomicHistogram.scala | 31 ++ .../kamon/metric/instrument/RefreshScheduler.scala | 115 ++++++ .../metric/instrument/UnitOfMeasurement.scala | 109 ++++++ .../legacy-main/scala/kamon/trace/Incubator.scala | 94 +++++ .../scala/kamon/trace/MetricsOnlyContext.scala | 186 ++++++++++ .../legacy-main/scala/kamon/trace/Sampler.scala | 101 ++++++ .../scala/kamon/trace/TraceContext.scala | 202 +++++++++++ .../legacy-main/scala/kamon/trace/TraceLocal.scala | 64 ++++ .../scala/kamon/trace/TraceSettings.scala | 51 +++ .../scala/kamon/trace/TraceSubscriptions.scala | 45 +++ .../scala/kamon/trace/TracerModule.scala | 197 +++++++++++ .../scala/kamon/trace/TracingContext.scala | 113 ++++++ .../trace/logging/LogbackTraceTokenConverter.scala | 26 ++ .../scala/kamon/trace/logging/MdcKeysSupport.scala | 54 +++ .../legacy-main/scala/kamon/util/ConfigTools.scala | 48 +++ .../scala/kamon/util/FastDispatch.scala | 38 ++ .../scala/kamon/util/FunctionalInterfaces.scala | 25 ++ .../legacy-main/scala/kamon/util/JavaTags.scala | 14 + .../src/legacy-main/scala/kamon/util/Latency.scala | 29 ++ .../scala/kamon/util/LazyActorRef.scala | 69 ++++ .../legacy-main/scala/kamon/util/MapMerge.scala | 43 +++ .../legacy-main/scala/kamon/util/NeedToScale.scala | 37 ++ .../scala/kamon/util/PaddedAtomicLong.scala | 25 ++ .../legacy-main/scala/kamon/util/PathFilter.scala | 5 + .../scala/kamon/util/RegexPathFilter.scala | 27 ++ .../kamon/util/SameThreadExecutionContext.scala | 30 ++ .../legacy-main/scala/kamon/util/Sequencer.scala | 40 +++ .../legacy-main/scala/kamon/util/Timestamp.scala | 107 ++++++ .../kamon/util/TriemapAtomicGetOrElseUpdate.scala | 45 +++ .../util/executors/ExecutorMetricRecorder.scala | 99 ++++++ .../util/executors/ExecutorServiceMetrics.scala | 134 +++++++ .../scala/kamon/util/http/HttpServerMetrics.scala | 41 +++ .../scala/kamon/util/logger/LazyLogger.scala | 49 +++ .../src/legacy-test/resources/application.conf | 26 ++ kamon-core/src/legacy-test/resources/logback.xml | 12 + .../scala/kamon/KamonLifecycleSpec.scala | 93 +++++ .../kamon/metric/MetricScaleDecoratorSpec.scala | 102 ++++++ .../scala/kamon/metric/SimpleMetricsSpec.scala | 106 ++++++ .../kamon/metric/SubscriptionsProtocolSpec.scala | 150 ++++++++ .../metric/TickMetricSnapshotBufferSpec.scala | 97 +++++ .../scala/kamon/metric/TraceMetricsSpec.scala | 121 +++++++ .../kamon/metric/instrument/CounterSpec.scala | 79 +++++ .../scala/kamon/metric/instrument/GaugeSpec.scala | 79 +++++ .../kamon/metric/instrument/HistogramSpec.scala | 157 ++++++++ .../metric/instrument/MinMaxCounterSpec.scala | 140 ++++++++ .../metric/instrument/UnitOfMeasurementSpec.scala | 98 +++++ .../scala/kamon/testkit/BaseKamonSpec.scala | 70 ++++ .../scala/kamon/trace/SamplerSpec.scala | 76 ++++ .../scala/kamon/trace/SimpleTraceSpec.scala | 167 +++++++++ .../kamon/trace/TraceContextManipulationSpec.scala | 113 ++++++ .../scala/kamon/trace/TraceLocalSpec.scala | 108 ++++++ .../kamon/trace/logging/MdcKeysSupportSpec.scala | 44 +++ .../scala/kamon/util/GlobPathFilterSpec.scala | 73 ++++ .../scala/kamon/util/NeedToScaleSpec.scala | 67 ++++ .../scala/kamon/util/RegexPathFilterSpec.scala | 59 +++ .../executors/ExecutorServiceMetricsSpec.scala | 75 ++++ .../src/main/java/kamon/jsr166/LongAdder.java | 224 ------------ .../src/main/java/kamon/jsr166/LongMaxUpdater.java | 191 ---------- .../src/main/java/kamon/jsr166/Striped64.java | 371 ------------------- .../src/main/java/kamon/util/GlobPathFilter.java | 110 ------ kamon-core/src/main/resources/META-INF/aop.xml | 15 - kamon-core/src/main/resources/reference.conf | 184 ---------- .../main/scala-2.10/kamon/ActorSystemTools.scala | 25 -- .../main/scala-2.11/kamon/ActorSystemTools.scala | 25 -- .../main/scala-2.12/kamon/ActorSystemTools.scala | 25 -- kamon-core/src/main/scala/kamon/Kamon.scala | 115 ------ kamon-core/src/main/scala/kamon/ModuleLoader.scala | 128 ------- .../src/main/scala/kamon/metric/Entity.scala | 37 -- .../main/scala/kamon/metric/EntityRecorder.scala | 235 ------------ .../main/scala/kamon/metric/EntitySnapshot.scala | 63 ---- .../src/main/scala/kamon/metric/MetricKey.scala | 47 --- .../scala/kamon/metric/MetricScaleDecorator.scala | 57 --- .../main/scala/kamon/metric/MetricsModule.scala | 394 --------------------- .../main/scala/kamon/metric/MetricsSettings.scala | 123 ------- .../kamon/metric/SubscriptionsDispatcher.scala | 116 ------ .../kamon/metric/TickMetricSnapshotBuffer.scala | 65 ---- .../src/main/scala/kamon/metric/TraceMetrics.scala | 53 --- .../scala/kamon/metric/instrument/Counter.scala | 65 ---- .../main/scala/kamon/metric/instrument/Gauge.scala | 120 ------- .../scala/kamon/metric/instrument/Histogram.scala | 331 ----------------- .../scala/kamon/metric/instrument/Instrument.scala | 51 --- .../metric/instrument/InstrumentFactory.scala | 51 --- .../metric/instrument/InstrumentSettings.scala | 73 ---- .../kamon/metric/instrument/MinMaxCounter.scala | 105 ------ .../instrument/ModifiedAtomicHistogram.scala | 31 -- .../kamon/metric/instrument/RefreshScheduler.scala | 115 ------ .../metric/instrument/UnitOfMeasurement.scala | 109 ------ .../src/main/scala/kamon/trace/Incubator.scala | 94 ----- .../scala/kamon/trace/MetricsOnlyContext.scala | 186 ---------- .../src/main/scala/kamon/trace/Sampler.scala | 101 ------ .../src/main/scala/kamon/trace/TraceContext.scala | 202 ----------- .../src/main/scala/kamon/trace/TraceLocal.scala | 64 ---- .../src/main/scala/kamon/trace/TraceSettings.scala | 51 --- .../scala/kamon/trace/TraceSubscriptions.scala | 45 --- .../src/main/scala/kamon/trace/TracerModule.scala | 197 ----------- .../main/scala/kamon/trace/TracingContext.scala | 113 ------ .../trace/logging/LogbackTraceTokenConverter.scala | 26 -- .../scala/kamon/trace/logging/MdcKeysSupport.scala | 54 --- .../src/main/scala/kamon/util/ConfigTools.scala | 48 --- .../src/main/scala/kamon/util/FastDispatch.scala | 38 -- .../scala/kamon/util/FunctionalInterfaces.scala | 25 -- .../src/main/scala/kamon/util/JavaTags.scala | 14 - kamon-core/src/main/scala/kamon/util/Latency.scala | 29 -- .../src/main/scala/kamon/util/LazyActorRef.scala | 69 ---- .../src/main/scala/kamon/util/MapMerge.scala | 43 --- .../src/main/scala/kamon/util/NeedToScale.scala | 37 -- .../main/scala/kamon/util/PaddedAtomicLong.scala | 25 -- .../src/main/scala/kamon/util/PathFilter.scala | 5 - .../main/scala/kamon/util/RegexPathFilter.scala | 27 -- .../kamon/util/SameThreadExecutionContext.scala | 30 -- .../src/main/scala/kamon/util/Sequencer.scala | 40 --- .../src/main/scala/kamon/util/Timestamp.scala | 107 ------ .../kamon/util/TriemapAtomicGetOrElseUpdate.scala | 45 --- .../util/executors/ExecutorMetricRecorder.scala | 99 ------ .../util/executors/ExecutorServiceMetrics.scala | 134 ------- .../scala/kamon/util/http/HttpServerMetrics.scala | 41 --- .../main/scala/kamon/util/logger/LazyLogger.scala | 49 --- kamon-core/src/test/resources/application.conf | 26 -- kamon-core/src/test/resources/logback.xml | 12 - .../src/test/scala/kamon/KamonLifecycleSpec.scala | 93 ----- .../kamon/metric/MetricScaleDecoratorSpec.scala | 102 ------ .../scala/kamon/metric/SimpleMetricsSpec.scala | 106 ------ .../kamon/metric/SubscriptionsProtocolSpec.scala | 150 -------- .../metric/TickMetricSnapshotBufferSpec.scala | 97 ----- .../test/scala/kamon/metric/TraceMetricsSpec.scala | 121 ------- .../kamon/metric/instrument/CounterSpec.scala | 79 ----- .../scala/kamon/metric/instrument/GaugeSpec.scala | 79 ----- .../kamon/metric/instrument/HistogramSpec.scala | 157 -------- .../metric/instrument/MinMaxCounterSpec.scala | 140 -------- .../metric/instrument/UnitOfMeasurementSpec.scala | 98 ----- .../test/scala/kamon/testkit/BaseKamonSpec.scala | 70 ---- .../src/test/scala/kamon/trace/SamplerSpec.scala | 76 ---- .../test/scala/kamon/trace/SimpleTraceSpec.scala | 167 --------- .../kamon/trace/TraceContextManipulationSpec.scala | 113 ------ .../test/scala/kamon/trace/TraceLocalSpec.scala | 108 ------ .../kamon/trace/logging/MdcKeysSupportSpec.scala | 44 --- .../test/scala/kamon/util/GlobPathFilterSpec.scala | 73 ---- .../test/scala/kamon/util/NeedToScaleSpec.scala | 67 ---- .../scala/kamon/util/RegexPathFilterSpec.scala | 59 --- .../executors/ExecutorServiceMetricsSpec.scala | 75 ---- 168 files changed, 7804 insertions(+), 7804 deletions(-) create mode 100644 kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java create mode 100644 kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java create mode 100644 kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java create mode 100644 kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java create mode 100644 kamon-core/src/legacy-main/resources/META-INF/aop.xml create mode 100644 kamon-core/src/legacy-main/resources/reference.conf create mode 100644 kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala create mode 100644 kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala create mode 100644 kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/Kamon.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala create mode 100755 kamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/Latency.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala create mode 100644 kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala create mode 100644 kamon-core/src/legacy-test/resources/application.conf create mode 100644 kamon-core/src/legacy-test/resources/logback.xml create mode 100644 kamon-core/src/legacy-test/scala/kamon/KamonLifecycleSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/MetricScaleDecoratorSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/SimpleMetricsSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/SubscriptionsProtocolSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/TraceMetricsSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/instrument/CounterSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/instrument/GaugeSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/instrument/HistogramSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/testkit/BaseKamonSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/trace/SamplerSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/trace/SimpleTraceSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/trace/TraceContextManipulationSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/trace/TraceLocalSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/util/GlobPathFilterSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/util/NeedToScaleSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/util/RegexPathFilterSpec.scala create mode 100644 kamon-core/src/legacy-test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala delete mode 100644 kamon-core/src/main/java/kamon/jsr166/LongAdder.java delete mode 100644 kamon-core/src/main/java/kamon/jsr166/LongMaxUpdater.java delete mode 100644 kamon-core/src/main/java/kamon/jsr166/Striped64.java delete mode 100644 kamon-core/src/main/java/kamon/util/GlobPathFilter.java delete mode 100644 kamon-core/src/main/resources/META-INF/aop.xml delete mode 100644 kamon-core/src/main/resources/reference.conf delete mode 100644 kamon-core/src/main/scala-2.10/kamon/ActorSystemTools.scala delete mode 100644 kamon-core/src/main/scala-2.11/kamon/ActorSystemTools.scala delete mode 100644 kamon-core/src/main/scala-2.12/kamon/ActorSystemTools.scala delete mode 100644 kamon-core/src/main/scala/kamon/Kamon.scala delete mode 100644 kamon-core/src/main/scala/kamon/ModuleLoader.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/Entity.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/EntityRecorder.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/EntitySnapshot.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/MetricKey.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/MetricScaleDecorator.scala delete mode 100755 kamon-core/src/main/scala/kamon/metric/MetricsModule.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/MetricsSettings.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/SubscriptionsDispatcher.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/TickMetricSnapshotBuffer.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/TraceMetrics.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/Counter.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/Gauge.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/Histogram.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/Instrument.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/InstrumentFactory.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/InstrumentSettings.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/MinMaxCounter.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/RefreshScheduler.scala delete mode 100644 kamon-core/src/main/scala/kamon/metric/instrument/UnitOfMeasurement.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/Incubator.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/MetricsOnlyContext.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/Sampler.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/TraceContext.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/TraceLocal.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/TraceSettings.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/TraceSubscriptions.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/TracerModule.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/TracingContext.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala delete mode 100644 kamon-core/src/main/scala/kamon/trace/logging/MdcKeysSupport.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/ConfigTools.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/FastDispatch.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/FunctionalInterfaces.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/JavaTags.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/Latency.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/LazyActorRef.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/MapMerge.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/NeedToScale.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/PaddedAtomicLong.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/PathFilter.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/RegexPathFilter.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/SameThreadExecutionContext.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/Sequencer.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/Timestamp.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/executors/ExecutorMetricRecorder.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/executors/ExecutorServiceMetrics.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/http/HttpServerMetrics.scala delete mode 100644 kamon-core/src/main/scala/kamon/util/logger/LazyLogger.scala delete mode 100644 kamon-core/src/test/resources/application.conf delete mode 100644 kamon-core/src/test/resources/logback.xml delete mode 100644 kamon-core/src/test/scala/kamon/KamonLifecycleSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/MetricScaleDecoratorSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/SimpleMetricsSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/SubscriptionsProtocolSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/TraceMetricsSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/instrument/CounterSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/instrument/GaugeSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/instrument/HistogramSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/testkit/BaseKamonSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/trace/SamplerSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/trace/SimpleTraceSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/trace/TraceContextManipulationSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/trace/TraceLocalSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/util/GlobPathFilterSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/util/NeedToScaleSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/util/RegexPathFilterSpec.scala delete mode 100644 kamon-core/src/test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala (limited to 'kamon-core/src') diff --git a/kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java b/kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java new file mode 100644 index 00000000..7e47ae63 --- /dev/null +++ b/kamon-core/src/legacy-main/java/kamon/jsr166/LongAdder.java @@ -0,0 +1,224 @@ +/* + +Note: this was copied from Doug Lea's CVS repository + http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ + +LongAdder.java version 1.14 + +*/ + + +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package kamon.jsr166; + +import java.io.Serializable; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link java.util.concurrent.atomic.AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code equals}, {@code hashCode} and {@code + * compareTo} because instances are expected to be mutated, and so are + * not useful as collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot; invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + public long sumAndReset() { + long sum = getAndSetBase(0L); + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.getAndSet(0L); + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java b/kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java new file mode 100644 index 00000000..fc9ea4e5 --- /dev/null +++ b/kamon-core/src/legacy-main/java/kamon/jsr166/LongMaxUpdater.java @@ -0,0 +1,191 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package kamon.jsr166; +import java.io.Serializable; + +/** + * One or more variables that together maintain a running {@code long} + * maximum with initial value {@code Long.MIN_VALUE}. When updates + * (method {@link #update}) are contended across threads, the set of + * variables may grow dynamically to reduce contention. Method {@link + * #max} (or, equivalently, {@link #longValue}) returns the current + * maximum across the variables maintaining updates. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code equals}, {@code hashCode} and {@code + * compareTo} because instances are expected to be mutated, and so are + * not useful as collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + * @author Doug Lea + */ +public class LongMaxUpdater extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of max for use in retryUpdate + */ + final long fn(long v, long x) { return v > x ? v : x; } + + /** + * Creates a new instance with initial maximum of {@code + * Long.MIN_VALUE}. + */ + public LongMaxUpdater() { + base = Long.MIN_VALUE; + } + + /** + * Creates a new instance with the given initialValue + */ + public LongMaxUpdater(long initialValue) { + base = initialValue; + } + + + /** + * Updates the maximum to be at least the given value. + * + * @param x the value to update + */ + public void update(long x) { + Cell[] as; long b, v; HashCode hc; Cell a; int n; + if ((as = cells) != null || + (b = base) < x && !casBase(b, x)) { + boolean uncontended = true; + int h = (hc = threadHashCode.get()).code; + if (as == null || (n = as.length) < 1 || + (a = as[(n - 1) & h]) == null || + ((v = a.value) < x && !(uncontended = a.cas(v, x)))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Returns the current maximum. The returned value is + * NOT an atomic snapshot; invocation in the absence of + * concurrent updates returns an accurate result, but concurrent + * updates that occur while the value is being calculated might + * not be incorporated. + * + * @return the maximum + */ + public long max() { + Cell[] as = cells; + long max = base; + if (as != null) { + int n = as.length; + long v; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null && (v = a.value) > max) + max = v; + } + } + return max; + } + + /** + * Resets variables maintaining updates to {@code Long.MIN_VALUE}. + * This method may be a useful alternative to creating a new + * updater, but is only effective if there are no concurrent + * updates. Because this method is intrinsically racy, it should + * only be used when it is known that no threads are concurrently + * updating. + */ + public void reset() { + internalReset(Long.MIN_VALUE); + } + + /** + * Equivalent in effect to {@link #max} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the maximum + */ + public long maxThenReset(long newValue) { + Cell[] as = cells; + long max = base; + base = newValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + long v = a.value; + a.value = newValue; + if (v > max) + max = v; + } + } + } + return max; + } + + /** + * Returns the String representation of the {@link #max}. + * @return the String representation of the {@link #max} + */ + public String toString() { + return Long.toString(max()); + } + + /** + * Equivalent to {@link #max}. + * + * @return the maximum + */ + public long longValue() { + return max(); + } + + /** + * Returns the {@link #max} as an {@code int} after a narrowing + * primitive conversion. + */ + public int intValue() { + return (int)max(); + } + + /** + * Returns the {@link #max} as a {@code float} + * after a widening primitive conversion. + */ + public float floatValue() { + return (float)max(); + } + + /** + * Returns the {@link #max} as a {@code double} after a widening + * primitive conversion. + */ + public double doubleValue() { + return (double)max(); + } + + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeLong(max()); + } + + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java b/kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java new file mode 100644 index 00000000..8fbfa4ba --- /dev/null +++ b/kamon-core/src/legacy-main/java/kamon/jsr166/Striped64.java @@ -0,0 +1,371 @@ +/* + +Note: this was copied from Doug Lea's CVS repository + http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ + +Striped64.java version 1.8 + +*/ + + +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package kamon.jsr166; + +import java.util.Random; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock; when the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + final long getAndSet(long val) { + long v; + do { + v = UNSAFE.getLongVolatile(this, valueOffset); + } while (!UNSAFE.compareAndSwapLong(this, valueOffset, v, val)); + return v; + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * Holder for the thread-local hash code. The code is initially + * random, but may be set to a different value upon collisions. + */ + static final class HashCode { + static final Random rng = new Random(); + int code; + HashCode() { + int h = rng.nextInt(); // Avoid zero to allow xorShift rehash + code = (h == 0) ? 1 : h; + } + } + + /** + * The corresponding ThreadLocal class + */ + static final class ThreadHashCode extends ThreadLocal { + public HashCode initialValue() { return new HashCode(); } + } + + /** + * Static per-thread hash codes. Shared across all instances to + * reduce ThreadLocal pollution and because adjustments due to + * collisions in one table are likely to be appropriate for + * others. + */ + static final ThreadHashCode threadHashCode = new ThreadHashCode(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the base field. + */ + final long getAndSetBase(long val) { + long v; + do { + v = UNSAFE.getLongVolatile(this, baseOffset); + } while (!UNSAFE.compareAndSwapLong(this, baseOffset, v, val)); + return v; + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { + int h = hc.code; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + hc.code = h; // Record index for next time + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException tryReflectionInstead) {} + try { + return java.security.AccessController.doPrivileged + (new java.security.PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + Class k = sun.misc.Unsafe.class; + for (java.lang.reflect.Field f : k.getDeclaredFields()) { + f.setAccessible(true); + Object x = f.get(null); + if (k.isInstance(x)) + return k.cast(x); + } + throw new NoSuchFieldError("the Unsafe"); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } +} diff --git a/kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java b/kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java new file mode 100644 index 00000000..ac5d08a6 --- /dev/null +++ b/kamon-core/src/legacy-main/java/kamon/util/GlobPathFilter.java @@ -0,0 +1,110 @@ +/* + * ========================================================================================= + * Copyright 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Default implementation of PathFilter. Uses glob based includes and excludes to determine whether to export. + * + * @author John E. Bailey + * @author David M. Lloyd + */ +public final class GlobPathFilter implements PathFilter { + private static final Pattern GLOB_PATTERN = Pattern.compile("(\\*\\*?)|(\\?)|(\\\\.)|(/+)|([^*?]+)"); + + private final String glob; + private final Pattern pattern; + + /** + * Construct a new instance. + * + * @param glob the path glob to match + */ + public GlobPathFilter(final String glob) { + pattern = getGlobPattern(glob); + this.glob = glob; + } + + /** + * Determine whether a path should be accepted. + * + * @param path the path to check + * @return true if the path should be accepted, false if not + */ + public boolean accept(final String path) { + return pattern.matcher(path).matches(); + } + + /** + * Get a regular expression pattern which accept any path names which match the given glob. The glob patterns + * function similarly to {@code ant} file patterns. Valid metacharacters in the glob pattern include: + *

+ * In addition, any glob pattern matches all subdirectories thereof. A glob pattern ending in {@code /} is equivalent + * to a glob pattern ending in /** in that the named directory is not itself included in the glob. + *

+ * See also: "Patterns" in the Ant Manual + * + * @param glob the glob to match + * + * @return the pattern + */ + private static Pattern getGlobPattern(final String glob) { + StringBuilder patternBuilder = new StringBuilder(); + final Matcher m = GLOB_PATTERN.matcher(glob); + boolean lastWasSlash = false; + while (m.find()) { + lastWasSlash = false; + String grp; + if ((grp = m.group(1)) != null) { + // match a * or ** + if (grp.length() == 2) { + // it's a *workers are able to process multiple metrics* + patternBuilder.append(".*"); + } else { + // it's a * + patternBuilder.append("[^/]*"); + } + } else if ((grp = m.group(2)) != null) { + // match a '?' glob pattern; any non-slash character + patternBuilder.append("[^/]"); + } else if ((grp = m.group(3)) != null) { + // backslash-escaped value + patternBuilder.append(Pattern.quote(m.group().substring(1))); + } else if ((grp = m.group(4)) != null) { + // match any number of / chars + patternBuilder.append("/+"); + lastWasSlash = true; + } else { + // some other string + patternBuilder.append(Pattern.quote(m.group())); + } + } + if (lastWasSlash) { + // ends in /, append ** + patternBuilder.append(".*"); + } + return Pattern.compile(patternBuilder.toString()); + } +} diff --git a/kamon-core/src/legacy-main/resources/META-INF/aop.xml b/kamon-core/src/legacy-main/resources/META-INF/aop.xml new file mode 100644 index 00000000..b13f9aac --- /dev/null +++ b/kamon-core/src/legacy-main/resources/META-INF/aop.xml @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kamon-core/src/legacy-main/resources/reference.conf b/kamon-core/src/legacy-main/resources/reference.conf new file mode 100644 index 00000000..48441493 --- /dev/null +++ b/kamon-core/src/legacy-main/resources/reference.conf @@ -0,0 +1,184 @@ +# ================================== # +# Kamon-Core Reference Configuration # +# ================================== # + +kamon { + metric { + + # Time interval for collecting all metrics and send the snapshots to all subscribed actors. + tick-interval = 10 seconds + + # Default size for the LongBuffer that gets allocated for metrics collection and merge. The + # value should correspond to the highest number of different buckets with values that might + # exist in a single histogram during a metrics collection. The default value of 33792 is a + # very conservative value and its equal to the total number of buckets required to cover values + # from 1 nanosecond to 1 hour with 0.1% precision (3 significant value digits). That means + # that would need to have at least one measurement on every bucket of a single histogram to + # fully utilize this buffer, which is *really* unlikely to ever happen. Since the buffer should + # be allocated once and reused it shouldn't impose a memory footprint issue. + default-collection-context-buffer-size = 33792 + + # Disables a big error message that will be typically logged if your application wasn't started + # with the -javaagent:/path-to-aspectj-weaver.jar option. If you are only using KamonStandalone + # it might be ok for you to turn this error off. + disable-aspectj-weaver-missing-error = false + + # Specify if entities that do not match any include/exclude filter should be tracked. + track-unmatched-entities = yes + + filters { + trace { + includes = [ "**" ] + excludes = [ ] + } + } + + # Default instrument settings for histograms, min max counters and gaugues. The actual settings to be used when + # creating a instrument is determined by merging the default settings, code settings and specific instrument + # settings using the following priorities (top wins): + + # - any setting in `kamon.metric.instrument-settings` for the given category/instrument. + # - code settings provided when creating the instrument. + # - `default-instrument-settings`. + # + default-instrument-settings { + histogram { + precision = normal + lowest-discernible-value = 1 + highest-trackable-value = 3600000000000 + } + + min-max-counter { + precision = normal + lowest-discernible-value = 1 + highest-trackable-value = 999999999 + refresh-interval = 100 milliseconds + } + + gauge { + precision = normal + lowest-discernible-value = 1 + highest-trackable-value = 3600000000000 + refresh-interval = 100 milliseconds + } + + } + + # Custom configurations for category instruments. The settings provided in this section will override the default + # and code instrument settings as explained in the `default-instrument-settings` key. There is no need to provide + # full instrument settings in this section, only the settings that should be overriden must be included. Example: + # if you wish to change the precision and lowest discernible value of the `elapsed-time` instrument for the `trace` + # category, you should include the following configuration in your application.conf file: + # + # kamon.metric.instrument-settings.trace { + # elapsed-time { + # precision = fine + # lowest-discernible-value = 1000 + # } + # } + # + # In this example, the value for the `highest-trackable-value` setting will be either the code setting or the default + # setting, depending on how the `elapsed-time` metric is created. + instrument-settings { + + } + } + + + trace { + + # Level of detail used when recording trace information. The possible values are: + # - metrics-only: metrics for all included traces and all segments are recorded, but no Trace messages will be sent + # to the subscribers of trace data. + # - simple-trace: metrics for all included traces and all segments are recorded and additionally a Trace message + # containing the trace and segments details and metadata. + level-of-detail = metrics-only + + # Sampling strategy to apply when the tracing level is set to `simple-trace`. The options are: all, random, ordered, + # threshold and clock. The details of each sampler are below. + sampling = random + + # Use a ThreadLocalRandom to generate numbers between 1 and 100, if the random number is less or equal to .chance + # then tracing information will be gathered and reported for the current trace. + random-sampler { + chance = 10 + } + + # Use a AtomicLong to ensure that every .sample-interval number of requests tracing information will be gathered and + # reported. + ordered-sampler { + # must be power of two + sample-interval = 8 + } + + # Fully qualified name of the function that will be used for generating tokens to traces. + token-generator = kamon.trace.DefaultTokenGenerator + + # Gather tracing information for all traces but only report those whose elapsed-time is equal or greated to the + # .minimum-elapsed-time setting. + threshold-sampler { + minimum-elapsed-time = 1 second + } + + # Use a FiniteDuration to only record a trace each .pause nanoseconds. + clock-sampler { + pause = 1 second + } + + incubator { + # Minimum time to stay in the trace incubator before checking if the trace should not be incubated anymore. No + # checks are made at least until this period has passed. + min-incubation-time = 5 seconds + + # Time to wait between incubation checks. After min-incubation-time, a trace is checked using this interval and if + # if shouldn't be incubated anymore, the TraceInfo is collected and reported for it. + check-interval = 1 second + + # Max amount of time that a trace can be in the incubator. If this time is reached for a given trace then it will + # be reported with whatever information is available at the moment, logging a warning for each segment that remains + # open after this point. + max-incubation-time = 20 seconds + } + } + + # All settings included under the internal-config key will be used to repleace the akka.* and spray.* settings. By + # doing this we avoid applying custom settings that might make sense for the user application to the internal actor + # system and Spray facilities used by Kamon. + internal-config { + + akka.actor { + provider = "local" + default-dispatcher { + fork-join-executor { + parallelism-min = 2 + parallelism-factor = 2.0 + parallelism-max = 10 + } + } + } + + spray { + + } + } + + # Controls whether the AspectJ Weaver missing warning should be displayed if any Kamon module requiring AspectJ is + # found in the classpath but the application is started without the AspectJ Weaver. + show-aspectj-missing-warning = yes + + modules { + # Just a place holder to ensure that the key is always available. Non-core Kamon modules should provide their + # settings in a module-info section. + } + + # Add tags to all reported metrics. Can be useful to identify the source of metrics from a particluar JVM instance. + # Example: + # + # default-tags { + # host: ${?HOSTNAME} + # container-name: ${?CONTAINER_NAME} + # } + default-tags { + + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala b/kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala new file mode 100644 index 00000000..01dd4234 --- /dev/null +++ b/kamon-core/src/legacy-main/scala-2.10/kamon/ActorSystemTools.scala @@ -0,0 +1,25 @@ +/* ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ +package kamon + +import scala.util.control.NonFatal + +import akka.actor.ActorSystem + +object ActorSystemTools { + private[kamon] def terminateActorSystem(system: ActorSystem): Unit = { + system.shutdown() + } +} diff --git a/kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala b/kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala new file mode 100644 index 00000000..01dd4234 --- /dev/null +++ b/kamon-core/src/legacy-main/scala-2.11/kamon/ActorSystemTools.scala @@ -0,0 +1,25 @@ +/* ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ +package kamon + +import scala.util.control.NonFatal + +import akka.actor.ActorSystem + +object ActorSystemTools { + private[kamon] def terminateActorSystem(system: ActorSystem): Unit = { + system.shutdown() + } +} diff --git a/kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala b/kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala new file mode 100644 index 00000000..762201d5 --- /dev/null +++ b/kamon-core/src/legacy-main/scala-2.12/kamon/ActorSystemTools.scala @@ -0,0 +1,25 @@ +/* ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ +package kamon + +import akka.actor.ActorSystem + +import scala.util.control.NonFatal + +object ActorSystemTools { + private[kamon] def terminateActorSystem(system: ActorSystem): Unit = { + system.terminate() + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/Kamon.scala b/kamon-core/src/legacy-main/scala/kamon/Kamon.scala new file mode 100644 index 00000000..c02d0505 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/Kamon.scala @@ -0,0 +1,115 @@ +/* ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ +package kamon + +import _root_.akka.actor +import _root_.akka.actor._ +import com.typesafe.config.{Config, ConfigFactory, ConfigParseOptions, ConfigResolveOptions} +import kamon.metric._ +import kamon.trace.TracerModuleImpl +import kamon.util.logger.LazyLogger + +import _root_.scala.util.control.NonFatal +import _root_.scala.util.{Failure, Success, Try} + +object Kamon { + trait Extension extends actor.Extension + + @volatile private var kamonInstance = new Instance() + + def config = kamonInstance.config + def metrics = kamonInstance.metrics + def tracer = kamonInstance.tracer + + def start(): Unit = synchronized { + kamonInstance.start() + } + + def start(conf: Config): Unit = synchronized { + kamonInstance.start(conf) + } + + def shutdown(): Unit = synchronized { + kamonInstance.shutdown() + kamonInstance = new Instance() + } + + private class Instance() { + private val log = LazyLogger(classOf[Instance]) + private var actorSystem: ActorSystem = _ + + var started = false + var config: Config = defaultConfig + val metrics = MetricsModuleImpl(config) + val tracer = TracerModuleImpl(metrics, config) + + private lazy val _start = { + log.info("Initializing Kamon...") + tryLoadAutoweaveModule() + actorSystem = ActorSystem("kamon", config) + metrics.start(actorSystem, config) + tracer.start(actorSystem, config) + actorSystem.registerExtension(ModuleLoader) + started = true + } + + def start(): Unit = { + _start + } + + def start(conf: Config): Unit = { + config = patchConfiguration(conf) + _start + } + + def shutdown(): Unit = { + if (started) { + ActorSystemTools.terminateActorSystem(actorSystem) + } + } + + private def defaultConfig = { + patchConfiguration( + ConfigFactory.load( + Thread.currentThread().getContextClassLoader(), + ConfigParseOptions.defaults(), + ConfigResolveOptions.defaults().setAllowUnresolved(true) + ) + ) + } + + private def patchConfiguration(config: Config): Config = { + val internalConfig = config.getConfig("kamon.internal-config") + config + .withoutPath("akka") + .withoutPath("spray") + .withFallback(internalConfig) + } + + private def tryLoadAutoweaveModule(): Unit = { + Try { + val autoweave = Class.forName("kamon.autoweave.Autoweave") + autoweave.getDeclaredMethod("attach").invoke(autoweave.newInstance()) + } match { + case Success(_) ⇒ + val color = (msg: String) ⇒ s"""\u001B[32m${msg}\u001B[0m""" + log.info(color("Kamon-autoweave has been successfully loaded.")) + log.info(color("The AspectJ load time weaving agent is now attached to the JVM (you don't need to use -javaagent).")) + log.info(color("This offers extra flexibility but obviously any classes loaded before attachment will not be woven.")) + case Failure(NonFatal(reason)) ⇒ log.debug(s"Kamon-autoweave failed to load. Reason: ${reason.getMessage}.") + } + } + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala b/kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala new file mode 100644 index 00000000..f1b5f414 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/ModuleLoader.scala @@ -0,0 +1,128 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon + +import _root_.akka.actor +import _root_.akka.actor._ +import kamon.util.logger.LazyLogger +import org.aspectj.lang.ProceedingJoinPoint +import org.aspectj.lang.annotation.{Around, Aspect, Pointcut} + +private[kamon] object ModuleLoader extends ExtensionId[ModuleLoaderExtension] with ExtensionIdProvider { + def lookup(): ExtensionId[_ <: actor.Extension] = ModuleLoader + def createExtension(system: ExtendedActorSystem): ModuleLoaderExtension = new ModuleLoaderExtension(system) +} + +private[kamon] class ModuleLoaderExtension(system: ExtendedActorSystem) extends Kamon.Extension { + val log = LazyLogger(getClass) + val settings = ModuleLoaderSettings(system) + + if (settings.modulesRequiringAspectJ.nonEmpty && !isAspectJPresent && settings.showAspectJMissingWarning) + logAspectJWeaverMissing(settings.modulesRequiringAspectJ) + + // Force initialization of all modules marked with auto-start. + settings.availableModules.filter(_.startInfo.nonEmpty).foreach { + case AvailableModuleInfo(name, requiresAJ, Some(ModuleStartInfo(autoStart, extensionClass))) if autoStart ⇒ + + system.dynamicAccess.getObjectFor[ExtensionId[Kamon.Extension]](extensionClass).map { moduleID ⇒ + log.debug(s"Auto starting the [$name] module.") + moduleID.get(system) + + } recover { + case th: Throwable ⇒ log.error(s"Failed to auto start the [$name] module.", th) + } + + case other ⇒ + + } + + // When AspectJ is present the kamon.supervisor.AspectJPresent aspect will make this return true. + def isAspectJPresent: Boolean = false + + def logAspectJWeaverMissing(modulesRequiringAspectJ: List[AvailableModuleInfo]): Unit = { + val moduleNames = modulesRequiringAspectJ.map(_.name).mkString(", ") + val weaverMissingMessage = + """ + | + | ___ _ ___ _ _ ___ ___ _ _ + | / _ \ | | |_ | | | | | | \/ |(_) (_) + |/ /_\ \ ___ _ __ ___ ___ | |_ | | | | | | ___ __ _ __ __ ___ _ __ | . . | _ ___ ___ _ _ __ __ _ + || _ |/ __|| '_ \ / _ \ / __|| __| | | | |/\| | / _ \ / _` |\ \ / // _ \| '__| | |\/| || |/ __|/ __|| || '_ \ / _` | + || | | |\__ \| |_) || __/| (__ | |_ /\__/ / \ /\ /| __/| (_| | \ V /| __/| | | | | || |\__ \\__ \| || | | || (_| | + |\_| |_/|___/| .__/ \___| \___| \__|\____/ \/ \/ \___| \__,_| \_/ \___||_| \_| |_/|_||___/|___/|_||_| |_| \__, | + | | | __/ | + | |_| |___/ + | + | It seems like your application was not started with the -javaagent:/path-to-aspectj-weaver.jar option but Kamon detected + | the following modules which require AspectJ to work properly: + | + """.stripMargin + moduleNames + + """ + | + | If you need help on setting up the aspectj weaver go to http://kamon.io/introduction/get-started/ for more info. On the + | other hand, if you are sure that you do not need or do not want to use the weaver then you can disable this error message + | by changing the kamon.show-aspectj-missing-warning setting in your configuration file. + | + """.stripMargin + + log.error(weaverMissingMessage) + } +} + +private[kamon] case class AvailableModuleInfo(name: String, requiresAspectJ: Boolean, startInfo: Option[ModuleStartInfo]) +private[kamon] case class ModuleStartInfo(autoStart: Boolean, extensionClass: String) +private[kamon] case class ModuleLoaderSettings(showAspectJMissingWarning: Boolean, availableModules: List[AvailableModuleInfo]) { + val modulesRequiringAspectJ = availableModules.filter(_.requiresAspectJ) +} + +private[kamon] object ModuleLoaderSettings { + + def apply(system: ActorSystem): ModuleLoaderSettings = { + import kamon.util.ConfigTools.Syntax + + val config = system.settings.config.getConfig("kamon.modules") + val showAspectJMissingWarning = system.settings.config.getBoolean("kamon.show-aspectj-missing-warning") + + val modules = config.firstLevelKeys + val availableModules = modules.map { moduleName ⇒ + val moduleConfig = config.getConfig(moduleName) + val requiresAspectJ = moduleConfig.getBoolean("requires-aspectj") + + val startInfo = + if (moduleConfig.hasPath("auto-start") && moduleConfig.hasPath("extension-class")) + Some(ModuleStartInfo(moduleConfig.getBoolean("auto-start"), moduleConfig.getString("extension-class"))) + else + None + + AvailableModuleInfo(moduleName, requiresAspectJ, startInfo) + + } toList + + ModuleLoaderSettings(showAspectJMissingWarning, availableModules) + } +} + +@Aspect +private[kamon] class AspectJPresent { + + @Pointcut("execution(* kamon.ModuleLoaderExtension.isAspectJPresent())") + def isAspectJPresentAtModuleSupervisor(): Unit = {} + + @Around("isAspectJPresentAtModuleSupervisor()") + def aroundIsAspectJPresentAtModuleSupervisor(pjp: ProceedingJoinPoint): Boolean = true + +} diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala b/kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala new file mode 100644 index 00000000..91249af0 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/Entity.scala @@ -0,0 +1,37 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +/** + * Identify a `thing` that is being monitored by Kamon. A [[kamon.metric.Entity]] is used to identify tracked `things` + * in both the metrics recording and reporting sides. Only the name and category fields are used with determining + * equality between two entities. + * + * // TODO: Find a better word for `thing`. + */ +case class Entity(name: String, category: String, tags: Map[String, String]) + +object Entity { + def apply(name: String, category: String): Entity = + apply(name, category, Map.empty) + + def create(name: String, category: String): Entity = + apply(name, category, Map.empty) + + def create(name: String, category: String, tags: Map[String, String]): Entity = + new Entity(name, category, tags) +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala b/kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala new file mode 100644 index 00000000..e3b136dd --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/EntityRecorder.scala @@ -0,0 +1,235 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import kamon.metric.instrument.Gauge.CurrentValueCollector +import kamon.metric.instrument.Histogram.DynamicRange +import kamon.metric.instrument._ +import kamon.util.Function + +import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.FiniteDuration + +trait EntityRecorder { + def collect(collectionContext: CollectionContext): EntitySnapshot + def cleanup: Unit +} + +trait EntityRecorderFactory[T <: EntityRecorder] { + def category: String + def createRecorder(instrumentFactory: InstrumentFactory): T +} + +abstract class EntityRecorderFactoryCompanion[T <: EntityRecorder](val category: String, builder: (InstrumentFactory) ⇒ T) + extends EntityRecorderFactory[T] { + + def createRecorder(instrumentFactory: InstrumentFactory): T = builder(instrumentFactory) +} + +object EntityRecorderFactory { + def apply[T <: EntityRecorder](entityCategory: String, factory: InstrumentFactory ⇒ T): EntityRecorderFactory[T] = + new EntityRecorderFactory[T] { + def category: String = entityCategory + def createRecorder(instrumentFactory: InstrumentFactory): T = factory(instrumentFactory) + } + + def create[T <: EntityRecorder](entityCategory: String, factory: Function[InstrumentFactory, T]): EntityRecorderFactory[T] = + new EntityRecorderFactory[T] { + def category: String = entityCategory + def createRecorder(instrumentFactory: InstrumentFactory): T = factory(instrumentFactory) + } +} + +private[kamon] sealed trait SingleInstrumentEntityRecorder extends EntityRecorder { + def key: MetricKey + def instrument: Instrument + + def collect(collectionContext: CollectionContext): EntitySnapshot = + new DefaultEntitySnapshot(Map(key → instrument.collect(collectionContext))) + + def cleanup: Unit = instrument.cleanup +} + +object SingleInstrumentEntityRecorder { + val Histogram = "histogram" + val MinMaxCounter = "min-max-counter" + val Gauge = "gauge" + val Counter = "counter" + + val AllCategories = List("histogram", "gauge", "counter", "min-max-counter") +} + +/** + * Entity recorder for a single Counter instrument. + */ +case class CounterRecorder(key: MetricKey, instrument: Counter) extends SingleInstrumentEntityRecorder + +/** + * Entity recorder for a single Histogram instrument. + */ +case class HistogramRecorder(key: MetricKey, instrument: Histogram) extends SingleInstrumentEntityRecorder + +/** + * Entity recorder for a single MinMaxCounter instrument. + */ +case class MinMaxCounterRecorder(key: MetricKey, instrument: MinMaxCounter) extends SingleInstrumentEntityRecorder + +/** + * Entity recorder for a single Gauge instrument. + */ +case class GaugeRecorder(key: MetricKey, instrument: Gauge) extends SingleInstrumentEntityRecorder + +/** + * Base class with plenty of utility methods to facilitate the creation of [[EntityRecorder]] implementations. + * It is not required to use this base class for defining a custom [[EntityRecorder]], but it is certainly + * the most convenient way to do it and the preferred approach throughout the Kamon codebase. + */ +abstract class GenericEntityRecorder(instrumentFactory: InstrumentFactory) extends EntityRecorder { + import kamon.util.TriemapAtomicGetOrElseUpdate.Syntax + + private val _instruments = TrieMap.empty[MetricKey, Instrument] + private def register[T <: Instrument](key: MetricKey, instrument: ⇒ T): T = + _instruments.atomicGetOrElseUpdate(key, instrument, _.cleanup).asInstanceOf[T] + + protected def histogram(name: String): Histogram = + register(HistogramKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createHistogram(name)) + + protected def histogram(name: String, dynamicRange: DynamicRange): Histogram = + register(HistogramKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createHistogram(name, Some(dynamicRange))) + + protected def histogram(name: String, unitOfMeasurement: UnitOfMeasurement): Histogram = + register(HistogramKey(name, unitOfMeasurement), instrumentFactory.createHistogram(name)) + + protected def histogram(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement): Histogram = + register(HistogramKey(name, unitOfMeasurement), instrumentFactory.createHistogram(name, Some(dynamicRange))) + + protected def removeHistogram(name: String): Unit = + _instruments.remove(HistogramKey(name, UnitOfMeasurement.Unknown)) + + protected def removeHistogram(name: String, unitOfMeasurement: UnitOfMeasurement): Unit = + _instruments.remove(HistogramKey(name, unitOfMeasurement)) + + protected def minMaxCounter(name: String): MinMaxCounter = + register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name)) + + protected def minMaxCounter(name: String, dynamicRange: DynamicRange): MinMaxCounter = + register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange))) + + protected def minMaxCounter(name: String, refreshInterval: FiniteDuration): MinMaxCounter = + register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, refreshInterval = Some(refreshInterval))) + + protected def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = + register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name)) + + protected def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter = + register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange), Some(refreshInterval))) + + protected def minMaxCounter(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = + register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange))) + + protected def minMaxCounter(name: String, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = + register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name, refreshInterval = Some(refreshInterval))) + + protected def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = + register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange), Some(refreshInterval))) + + protected def minMaxCounter(key: MinMaxCounterKey): MinMaxCounter = + register(key, instrumentFactory.createMinMaxCounter(key.name)) + + protected def minMaxCounter(key: MinMaxCounterKey, dynamicRange: DynamicRange): MinMaxCounter = + register(key, instrumentFactory.createMinMaxCounter(key.name, Some(dynamicRange))) + + protected def minMaxCounter(key: MinMaxCounterKey, refreshInterval: FiniteDuration): MinMaxCounter = + register(key, instrumentFactory.createMinMaxCounter(key.name, refreshInterval = Some(refreshInterval))) + + protected def minMaxCounter(key: MinMaxCounterKey, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter = + register(key, instrumentFactory.createMinMaxCounter(key.name, Some(dynamicRange), Some(refreshInterval))) + + protected def removeMinMaxCounter(name: String): Unit = + _instruments.remove(MinMaxCounterKey(name, UnitOfMeasurement.Unknown)) + + protected def removeMinMaxCounter(key: MinMaxCounterKey): Unit = + _instruments.remove(key) + + protected def gauge(name: String, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, valueCollector = valueCollector)) + + protected def gauge(name: String, dynamicRange: DynamicRange, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, Some(dynamicRange), valueCollector = valueCollector)) + + protected def gauge(name: String, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector)) + + protected def gauge(name: String, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, valueCollector = valueCollector)) + + protected def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, Some(dynamicRange), Some(refreshInterval), valueCollector = valueCollector)) + + protected def gauge(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, Some(dynamicRange), valueCollector = valueCollector)) + + protected def gauge(name: String, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector)) + + protected def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = + register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, Some(dynamicRange), Some(refreshInterval), valueCollector)) + + protected def gauge(key: GaugeKey, valueCollector: CurrentValueCollector): Gauge = + register(key, instrumentFactory.createGauge(key.name, valueCollector = valueCollector)) + + protected def gauge(key: GaugeKey, dynamicRange: DynamicRange, valueCollector: CurrentValueCollector): Gauge = + register(key, instrumentFactory.createGauge(key.name, Some(dynamicRange), valueCollector = valueCollector)) + + protected def gauge(key: GaugeKey, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = + register(key, instrumentFactory.createGauge(key.name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector)) + + protected def gauge(key: GaugeKey, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = + register(key, instrumentFactory.createGauge(key.name, Some(dynamicRange), Some(refreshInterval), valueCollector = valueCollector)) + + protected def removeGauge(name: String): Unit = + _instruments.remove(GaugeKey(name, UnitOfMeasurement.Unknown)) + + protected def removeGauge(key: GaugeKey): Unit = + _instruments.remove(key) + + protected def counter(name: String): Counter = + register(CounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createCounter()) + + protected def counter(name: String, unitOfMeasurement: UnitOfMeasurement): Counter = + register(CounterKey(name, unitOfMeasurement), instrumentFactory.createCounter()) + + protected def counter(key: CounterKey): Counter = + register(key, instrumentFactory.createCounter()) + + protected def removeCounter(name: String): Unit = + _instruments.remove(CounterKey(name, UnitOfMeasurement.Unknown)) + + protected def removeCounter(key: CounterKey): Unit = + _instruments.remove(key) + + def collect(collectionContext: CollectionContext): EntitySnapshot = { + val snapshots = Map.newBuilder[MetricKey, InstrumentSnapshot] + _instruments.foreach { + case (key, instrument) ⇒ snapshots += key → instrument.collect(collectionContext) + } + + new DefaultEntitySnapshot(snapshots.result()) + } + + def cleanup: Unit = _instruments.values.foreach(_.cleanup) +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala b/kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala new file mode 100644 index 00000000..16edecd8 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/EntitySnapshot.scala @@ -0,0 +1,63 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import kamon.metric.instrument.{Counter, Histogram, CollectionContext, InstrumentSnapshot} +import kamon.util.MapMerge +import scala.reflect.ClassTag + +trait EntitySnapshot { + def metrics: Map[MetricKey, InstrumentSnapshot] + def merge(that: EntitySnapshot, collectionContext: CollectionContext): EntitySnapshot + + def histogram(name: String): Option[Histogram.Snapshot] = + find[HistogramKey, Histogram.Snapshot](name) + + def minMaxCounter(name: String): Option[Histogram.Snapshot] = + find[MinMaxCounterKey, Histogram.Snapshot](name) + + def gauge(name: String): Option[Histogram.Snapshot] = + find[GaugeKey, Histogram.Snapshot](name) + + def counter(name: String): Option[Counter.Snapshot] = + find[CounterKey, Counter.Snapshot](name) + + def histograms: Map[HistogramKey, Histogram.Snapshot] = + filterByType[HistogramKey, Histogram.Snapshot] + + def minMaxCounters: Map[MinMaxCounterKey, Histogram.Snapshot] = + filterByType[MinMaxCounterKey, Histogram.Snapshot] + + def gauges: Map[GaugeKey, Histogram.Snapshot] = + filterByType[GaugeKey, Histogram.Snapshot] + + def counters: Map[CounterKey, Counter.Snapshot] = + filterByType[CounterKey, Counter.Snapshot] + + private def filterByType[K <: MetricKey, V <: InstrumentSnapshot](implicit keyCT: ClassTag[K]): Map[K, V] = + metrics.collect { case (k, v) if keyCT.runtimeClass.isInstance(k) ⇒ (k.asInstanceOf[K], v.asInstanceOf[V]) } + + private def find[K <: MetricKey, V <: InstrumentSnapshot](name: String)(implicit keyCT: ClassTag[K]) = + metrics.find { case (k, v) ⇒ keyCT.runtimeClass.isInstance(k) && k.name == name } map (_._2.asInstanceOf[V]) +} + +class DefaultEntitySnapshot(val metrics: Map[MetricKey, InstrumentSnapshot]) extends EntitySnapshot { + import MapMerge.Syntax + + override def merge(that: EntitySnapshot, collectionContext: CollectionContext): EntitySnapshot = + new DefaultEntitySnapshot(metrics.merge(that.metrics, (l, r) ⇒ l.merge(r, collectionContext))) +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala new file mode 100644 index 00000000..0d4e0163 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricKey.scala @@ -0,0 +1,47 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import kamon.metric.instrument.UnitOfMeasurement + +/** + * MetricKeys are used to identify a given metric in entity recorders and snapshots. + */ +sealed trait MetricKey { + def name: String + def unitOfMeasurement: UnitOfMeasurement +} + +/** + * MetricKey for all Histogram-based metrics. + */ +private[kamon] case class HistogramKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey + +/** + * MetricKey for all MinMaxCounter-based metrics. + */ +private[kamon] case class MinMaxCounterKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey + +/** + * MetricKey for all Gauge-based metrics. + */ +private[kamon] case class GaugeKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey + +/** + * MetricKey for all Counter-based metrics. + */ +private[kamon] case class CounterKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala new file mode 100644 index 00000000..06de65ef --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricScaleDecorator.scala @@ -0,0 +1,57 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import akka.actor.{Actor, ActorRef, Props} +import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot +import kamon.metric.instrument._ + +/** + * Can be used as a decorator to scale TickMetricSnapshot messages to given `timeUnits` and/or `memoryUnits` + * before forwarding to original receiver + * @param timeUnits Optional time units to scale time metrics to + * @param memoryUnits Optional memory units to scale memory metrics to + * @param receiver Receiver of scaled metrics snapshot, usually a backend sender + */ +class MetricScaleDecorator(timeUnits: Option[Time], memoryUnits: Option[Memory], receiver: ActorRef) extends Actor { + require( + timeUnits.isDefined || memoryUnits.isDefined, + "Use MetricScaleDecorator only when any of units is defined" + ) + + override def receive: Receive = { + case tick: TickMetricSnapshot ⇒ + val scaled = tick.copy(metrics = tick.metrics.mapValues { entitySnapshot ⇒ + new DefaultEntitySnapshot(entitySnapshot.metrics.map { + case (metricKey, metricSnapshot) ⇒ + val scaledSnapshot = (metricKey.unitOfMeasurement, timeUnits, memoryUnits) match { + case (time: Time, Some(to), _) ⇒ metricSnapshot.scale(time, to) + case (memory: Memory, _, Some(to)) ⇒ metricSnapshot.scale(memory, to) + case _ ⇒ metricSnapshot + } + metricKey → scaledSnapshot + }) + }) + receiver forward scaled + } +} + +object MetricScaleDecorator { + def props(timeUnits: Option[Time], memoryUnits: Option[Memory], receiver: ActorRef): Props = + Props(new MetricScaleDecorator(timeUnits, memoryUnits, receiver)) +} + diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala new file mode 100755 index 00000000..7c85bb02 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsModule.scala @@ -0,0 +1,394 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import java.util.Map.Entry + +import akka.actor._ +import com.typesafe.config.{Config, ConfigValue, ConfigValueType} +import kamon.metric.SubscriptionsDispatcher.{Subscribe, Unsubscribe} +import kamon.metric.instrument.Gauge.CurrentValueCollector +import kamon.metric.instrument.Histogram.DynamicRange +import kamon.metric.instrument._ +import kamon.util.LazyActorRef + +import scala.collection.JavaConverters._ +import scala.collection.concurrent.TrieMap +import scala.concurrent.duration.FiniteDuration + +case class EntityRegistration[T <: EntityRecorder](entity: Entity, recorder: T) + +trait MetricsModule { + def settings: MetricsSettings + + def shouldTrack(entity: Entity): Boolean + + def shouldTrack(entityName: String, category: String): Boolean = + shouldTrack(Entity(entityName, category)) + + // + // Histograms registration and removal + // + + def histogram(name: String): Histogram = + registerHistogram(name) + + def histogram(name: String, unitOfMeasurement: UnitOfMeasurement): Histogram = + registerHistogram(name, unitOfMeasurement = Some(unitOfMeasurement)) + + def histogram(name: String, dynamicRange: DynamicRange): Histogram = + registerHistogram(name, dynamicRange = Some(dynamicRange)) + + def histogram(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): Histogram = + registerHistogram(name, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange)) + + def histogram(name: String, tags: Map[String, String]): Histogram = + registerHistogram(name, tags) + + def histogram(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): Histogram = + registerHistogram(name, tags, Some(unitOfMeasurement)) + + def histogram(name: String, tags: Map[String, String], dynamicRange: DynamicRange): Histogram = + registerHistogram(name, tags, dynamicRange = Some(dynamicRange)) + + def histogram(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): Histogram = + registerHistogram(name, tags, Some(unitOfMeasurement), Some(dynamicRange)) + + def removeHistogram(name: String): Boolean = + removeHistogram(name, Map.empty) + + def registerHistogram(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, + dynamicRange: Option[DynamicRange] = None): Histogram + + def removeHistogram(name: String, tags: Map[String, String]): Boolean + + // + // MinMaxCounter registration and removal + // + + def minMaxCounter(name: String): MinMaxCounter = + registerMinMaxCounter(name) + + def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = + registerMinMaxCounter(name, unitOfMeasurement = Some(unitOfMeasurement)) + + def minMaxCounter(name: String, dynamicRange: DynamicRange): MinMaxCounter = + registerMinMaxCounter(name, dynamicRange = Some(dynamicRange)) + + def minMaxCounter(name: String, refreshInterval: FiniteDuration): MinMaxCounter = + registerMinMaxCounter(name, refreshInterval = Some(refreshInterval)) + + def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter = + registerMinMaxCounter(name, dynamicRange = Some(dynamicRange), refreshInterval = Some(refreshInterval)) + + def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): MinMaxCounter = + registerMinMaxCounter(name, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange)) + + def minMaxCounter(name: String, tags: Map[String, String]): MinMaxCounter = + registerMinMaxCounter(name, tags) + + def minMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = + registerMinMaxCounter(name, tags, Some(unitOfMeasurement)) + + def minMaxCounter(name: String, tags: Map[String, String], dynamicRange: DynamicRange): MinMaxCounter = + registerMinMaxCounter(name, tags, dynamicRange = Some(dynamicRange)) + + def minMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): MinMaxCounter = + registerMinMaxCounter(name, tags, Some(unitOfMeasurement), Some(dynamicRange)) + + def removeMinMaxCounter(name: String): Boolean = + removeMinMaxCounter(name, Map.empty) + + def removeMinMaxCounter(name: String, tags: Map[String, String]): Boolean + + def registerMinMaxCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, + dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None): MinMaxCounter + + // + // Gauge registration and removal + // + + def gauge(name: String)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector) + + def gauge(name: String, unitOfMeasurement: UnitOfMeasurement)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, unitOfMeasurement = Some(unitOfMeasurement)) + + def gauge(name: String, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, dynamicRange = Some(dynamicRange)) + + def gauge(name: String, refreshInterval: FiniteDuration)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, refreshInterval = Some(refreshInterval)) + + def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, dynamicRange = Some(dynamicRange), refreshInterval = Some(refreshInterval)) + + def gauge(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange)) + + def gauge(name: String, tags: Map[String, String])(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, tags) + + def gauge(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, tags, Some(unitOfMeasurement)) + + def gauge(name: String, tags: Map[String, String], dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, tags, dynamicRange = Some(dynamicRange)) + + def gauge(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = + registerGauge(name, valueCollector, tags, Some(unitOfMeasurement), Some(dynamicRange)) + + def removeGauge(name: String): Boolean = + removeGauge(name, Map.empty) + + def removeGauge(name: String, tags: Map[String, String]): Boolean + + def registerGauge(name: String, valueCollector: CurrentValueCollector, tags: Map[String, String] = Map.empty, + unitOfMeasurement: Option[UnitOfMeasurement] = None, dynamicRange: Option[DynamicRange] = None, + refreshInterval: Option[FiniteDuration] = None): Gauge + + // + // Counters registration and removal + // + + def counter(name: String): Counter = + registerCounter(name) + + def counter(name: String, unitOfMeasurement: UnitOfMeasurement): Counter = + registerCounter(name, unitOfMeasurement = Some(unitOfMeasurement)) + + def counter(name: String, tags: Map[String, String]): Counter = + registerCounter(name, tags) + + def counter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): Counter = + registerCounter(name, tags, Some(unitOfMeasurement)) + + def removeCounter(name: String): Boolean = + removeCounter(name, Map.empty) + + def removeCounter(name: String, tags: Map[String, String]): Boolean + + def registerCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, + dynamicRange: Option[DynamicRange] = None): Counter + + // + // Entities registration and removal + // + + def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], name: String): T = + entity(recorderFactory, Entity(name, recorderFactory.category)) + + def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], name: String, tags: Map[String, String]): T = + entity(recorderFactory, Entity(name, recorderFactory.category, tags)) + + def removeEntity(name: String, category: String): Boolean = + removeEntity(Entity(name, category, Map.empty)) + + def removeEntity(name: String, category: String, tags: Map[String, String]): Boolean = + removeEntity(Entity(name, category, tags)) + + def removeEntity(entity: Entity): Boolean + + def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], entity: Entity): T + + def find(name: String, category: String): Option[EntityRecorder] = + find(Entity(name, category)) + + def find(name: String, category: String, tags: Map[String, String]): Option[EntityRecorder] = + find(Entity(name, category, tags)) + + def find(entity: Entity): Option[EntityRecorder] + + def subscribe(filter: SubscriptionFilter, subscriber: ActorRef): Unit = + subscribe(filter, subscriber, permanently = true) + + def subscribe(category: String, selection: String, subscriber: ActorRef, permanently: Boolean): Unit = + subscribe(SubscriptionFilter(category, selection), subscriber, permanently) + + def subscribe(category: String, selection: String, subscriber: ActorRef): Unit = + subscribe(SubscriptionFilter(category, selection), subscriber, permanently = true) + + def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanently: Boolean): Unit + + def unsubscribe(subscriber: ActorRef): Unit + + def buildDefaultCollectionContext: CollectionContext + + def instrumentFactory(category: String): InstrumentFactory +} + +private[kamon] class MetricsModuleImpl(config: Config) extends MetricsModule { + import kamon.util.TriemapAtomicGetOrElseUpdate.Syntax + + private val _trackedEntities = TrieMap.empty[Entity, EntityRecorder] + private val _subscriptions = new LazyActorRef + + @volatile var settings = MetricsSettings(config) + + val defaultTags: Map[String, String] = if (config.hasPath("kamon.default-tags")) { + config.getConfig("kamon.default-tags").resolve().entrySet().asScala + .collect { + case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.STRING => + (e.getKey, e.getValue.unwrapped().asInstanceOf[String]) + case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.NUMBER => + (e.getKey, e.getValue.unwrapped().asInstanceOf[Int].toString) + case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.BOOLEAN => + (e.getKey, e.getValue.unwrapped().asInstanceOf[Boolean].toString) + }.toMap + } + else { + Map.empty + } + + def shouldTrack(entity: Entity): Boolean = + settings.entityFilters.get(entity.category).map { + filter ⇒ filter.accept(entity.name) + + } getOrElse (settings.trackUnmatchedEntities) + + def registerHistogram(name: String, tags: Map[String, String], unitOfMeasurement: Option[UnitOfMeasurement], + dynamicRange: Option[DynamicRange]): Histogram = { + + val histogramEntity = Entity(name, SingleInstrumentEntityRecorder.Histogram, tags ++ defaultTags) + val recorder = _trackedEntities.atomicGetOrElseUpdate(histogramEntity, { + val factory = instrumentFactory(histogramEntity.category) + HistogramRecorder( + HistogramKey(histogramEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), + factory.createHistogram(name, dynamicRange) + ) + }, _.cleanup) + + recorder.asInstanceOf[HistogramRecorder].instrument + } + + def removeHistogram(name: String, tags: Map[String, String]): Boolean = + _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Histogram, tags ++ defaultTags)).isDefined + + def registerMinMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: Option[UnitOfMeasurement], dynamicRange: Option[DynamicRange], + refreshInterval: Option[FiniteDuration]): MinMaxCounter = { + + val minMaxCounterEntity = Entity(name, SingleInstrumentEntityRecorder.MinMaxCounter, tags ++ defaultTags) + val recorder = _trackedEntities.atomicGetOrElseUpdate(minMaxCounterEntity, { + val factory = instrumentFactory(minMaxCounterEntity.category) + MinMaxCounterRecorder( + MinMaxCounterKey(minMaxCounterEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), + factory.createMinMaxCounter(name, dynamicRange, refreshInterval) + ) + }, _.cleanup) + + recorder.asInstanceOf[MinMaxCounterRecorder].instrument + } + + def removeMinMaxCounter(name: String, tags: Map[String, String]): Boolean = + _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.MinMaxCounter, tags ++ defaultTags)).isDefined + + def registerGauge(name: String, valueCollector: CurrentValueCollector, tags: Map[String, String] = Map.empty, + unitOfMeasurement: Option[UnitOfMeasurement] = None, dynamicRange: Option[DynamicRange] = None, + refreshInterval: Option[FiniteDuration] = None): Gauge = { + + val gaugeEntity = Entity(name, SingleInstrumentEntityRecorder.Gauge, tags ++ defaultTags) + val recorder = _trackedEntities.atomicGetOrElseUpdate(gaugeEntity, { + val factory = instrumentFactory(gaugeEntity.category) + GaugeRecorder( + GaugeKey(gaugeEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), + factory.createGauge(name, dynamicRange, refreshInterval, valueCollector) + ) + }, _.cleanup) + + recorder.asInstanceOf[GaugeRecorder].instrument + } + + def removeGauge(name: String, tags: Map[String, String]): Boolean = + _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Gauge, tags ++ defaultTags)).isDefined + + def registerCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, + dynamicRange: Option[DynamicRange] = None): Counter = { + + val counterEntity = Entity(name, SingleInstrumentEntityRecorder.Counter, tags ++ defaultTags) + val recorder = _trackedEntities.atomicGetOrElseUpdate(counterEntity, { + val factory = instrumentFactory(counterEntity.category) + CounterRecorder( + CounterKey(counterEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), + factory.createCounter() + ) + }, _.cleanup) + + recorder.asInstanceOf[CounterRecorder].instrument + } + + def removeCounter(name: String, tags: Map[String, String]): Boolean = + _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Counter, tags ++ defaultTags)).isDefined + + def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], entity: Entity): T = { + _trackedEntities.atomicGetOrElseUpdate(entity.copy(tags = entity.tags ++ defaultTags), { + recorderFactory.createRecorder(instrumentFactory(recorderFactory.category)) + }, _.cleanup).asInstanceOf[T] + } + + def removeEntity(entity: Entity): Boolean = { + val removedEntity = _trackedEntities.remove(entity.copy(tags = entity.tags ++ defaultTags)) + removedEntity.foreach(_.cleanup) + removedEntity.isDefined + } + + def find(entity: Entity): Option[EntityRecorder] = + _trackedEntities.get(entity.copy(tags = entity.tags ++ defaultTags)) + + def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanent: Boolean): Unit = + _subscriptions.tell(Subscribe(filter, subscriber, permanent)) + + def unsubscribe(subscriber: ActorRef): Unit = + _subscriptions.tell(Unsubscribe(subscriber)) + + def buildDefaultCollectionContext: CollectionContext = + CollectionContext(settings.defaultCollectionContextBufferSize) + + def instrumentFactory(category: String): InstrumentFactory = + settings.instrumentFactories.getOrElse(category, settings.defaultInstrumentFactory) + + private[kamon] def collectSnapshots(collectionContext: CollectionContext): Map[Entity, EntitySnapshot] = { + val builder = Map.newBuilder[Entity, EntitySnapshot] + _trackedEntities.foreach { + case (identity, recorder) ⇒ builder += ((identity, recorder.collect(collectionContext))) + } + + builder.result() + } + + /** + * Metrics Extension initialization. + */ + private var _system: ActorSystem = null + private lazy val _start = { + _subscriptions.point(_system.actorOf(SubscriptionsDispatcher.props(settings.tickInterval, this), "metrics")) + settings.pointScheduler(DefaultRefreshScheduler(_system.scheduler, _system.dispatcher)) + } + + def start(system: ActorSystem, newConfig: Config): Unit = synchronized { + settings = MetricsSettings(newConfig) + _system = system + _start + _system = null + } +} + +private[kamon] object MetricsModuleImpl { + + def apply(config: Config) = + new MetricsModuleImpl(config) +} + diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala new file mode 100644 index 00000000..592e8f67 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/MetricsSettings.scala @@ -0,0 +1,123 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import com.typesafe.config.Config +import kamon.metric.instrument._ +import kamon.util.PathFilter +import kamon.util.GlobPathFilter +import kamon.util.RegexPathFilter + +import scala.concurrent.duration.FiniteDuration + +/** + * Configuration settings for the Metrics extension, as read from the `kamon.metric` configuration key. + */ +case class MetricsSettings( + tickInterval: FiniteDuration, + defaultCollectionContextBufferSize: Int, + trackUnmatchedEntities: Boolean, + entityFilters: Map[String, EntityFilter], + instrumentFactories: Map[String, InstrumentFactory], + defaultInstrumentFactory: InstrumentFactory, + refreshScheduler: RefreshScheduler +) { + + private[kamon] def pointScheduler(targetScheduler: RefreshScheduler): Unit = refreshScheduler match { + case lrs: LazyRefreshScheduler ⇒ lrs.point(targetScheduler) + case others ⇒ + } +} + +/** + * + */ +case class EntityFilter(includes: List[PathFilter], excludes: List[PathFilter]) { + def accept(name: String): Boolean = + includes.exists(_.accept(name)) && !excludes.exists(_.accept(name)) +} + +object MetricsSettings { + import kamon.util.ConfigTools.Syntax + + def apply(config: Config): MetricsSettings = { + val metricConfig = config.getConfig("kamon.metric") + + val tickInterval = metricConfig.getFiniteDuration("tick-interval") + val collectBufferSize = metricConfig.getInt("default-collection-context-buffer-size") + val trackUnmatchedEntities = metricConfig.getBoolean("track-unmatched-entities") + val entityFilters = loadFilters(metricConfig.getConfig("filters")) + val defaultInstrumentSettings = DefaultInstrumentSettings.fromConfig(metricConfig.getConfig("default-instrument-settings")) + + val refreshScheduler = new LazyRefreshScheduler + val instrumentFactories = loadInstrumentFactories(metricConfig.getConfig("instrument-settings"), defaultInstrumentSettings, refreshScheduler) + val defaultInstrumentFactory = new InstrumentFactory(Map.empty, defaultInstrumentSettings, refreshScheduler) + + MetricsSettings(tickInterval, collectBufferSize, trackUnmatchedEntities, entityFilters, instrumentFactories, + defaultInstrumentFactory, refreshScheduler) + } + + /** + * Load all the default filters configured under the `kamon.metric.filters` configuration key. All filters are + * defined with the entity category as a sub-key of the `kamon.metric.filters` key and two sub-keys to it: includes + * and excludes with lists of string glob or regex patterns as values ('asRegex' defaults to false). Example: + * + * {{{ + * + * kamon.metrics.filters { + * actor { + * includes = ["user/test-actor", "user/service/worker-*"] + * excludes = ["user/IO-*"] + * asRegex = false + * } + * } + * + * }}} + * + * @return a Map from category name to corresponding entity filter. + */ + def loadFilters(filtersConfig: Config): Map[String, EntityFilter] = { + import scala.collection.JavaConverters._ + + filtersConfig.firstLevelKeys map { category: String ⇒ + val asRegex = if (filtersConfig.hasPath(s"$category.asRegex")) filtersConfig.getBoolean(s"$category.asRegex") else false + val includes = filtersConfig.getStringList(s"$category.includes").asScala.map(inc ⇒ + if (asRegex) RegexPathFilter(inc) else new GlobPathFilter(inc)).toList + val excludes = filtersConfig.getStringList(s"$category.excludes").asScala.map(exc ⇒ + if (asRegex) RegexPathFilter(exc) else new GlobPathFilter(exc)).toList + + (category, EntityFilter(includes, excludes)) + } toMap + } + + /** + * Load any custom configuration settings defined under the `kamon.metric.instrument-settings` configuration key and + * create InstrumentFactories for them. + * + * @return a Map from category name to InstrumentFactory. + */ + def loadInstrumentFactories(instrumentSettings: Config, defaults: DefaultInstrumentSettings, refreshScheduler: RefreshScheduler): Map[String, InstrumentFactory] = { + instrumentSettings.firstLevelKeys.map { category ⇒ + val categoryConfig = instrumentSettings.getConfig(category) + val customSettings = categoryConfig.firstLevelKeys.map { instrumentName ⇒ + (instrumentName, InstrumentCustomSettings.fromConfig(categoryConfig.getConfig(instrumentName))) + } toMap + + (category, new InstrumentFactory(customSettings, defaults, refreshScheduler)) + } toMap + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala b/kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala new file mode 100644 index 00000000..09bf58ad --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/SubscriptionsDispatcher.scala @@ -0,0 +1,116 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import akka.actor._ +import kamon.metric.SubscriptionsDispatcher._ +import kamon.util.{MilliTimestamp, GlobPathFilter} +import scala.concurrent.duration.FiniteDuration + +/** + * Manages subscriptions to metrics and dispatch snapshots on every tick to all subscribers. + */ +private[kamon] class SubscriptionsDispatcher(interval: FiniteDuration, metricsExtension: MetricsModuleImpl) extends Actor { + var lastTick = MilliTimestamp.now + var oneShotSubscriptions = Map.empty[ActorRef, SubscriptionFilter] + var permanentSubscriptions = Map.empty[ActorRef, SubscriptionFilter] + val tickSchedule = context.system.scheduler.schedule(interval, interval, self, Tick)(context.dispatcher) + val collectionContext = metricsExtension.buildDefaultCollectionContext + + def receive = { + case Tick ⇒ processTick() + case Subscribe(filter, subscriber, permanently) ⇒ subscribe(filter, subscriber, permanently) + case Unsubscribe(subscriber) ⇒ unsubscribe(subscriber) + case Terminated(subscriber) ⇒ unsubscribe(subscriber) + } + + def processTick(): Unit = + dispatch(metricsExtension.collectSnapshots(collectionContext)) + + def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanent: Boolean): Unit = { + def addSubscription(storage: Map[ActorRef, SubscriptionFilter]): Map[ActorRef, SubscriptionFilter] = + storage.updated(subscriber, storage.getOrElse(subscriber, SubscriptionFilter.Empty).combine(filter)) + + context.watch(subscriber) + + if (permanent) + permanentSubscriptions = addSubscription(permanentSubscriptions) + else + oneShotSubscriptions = addSubscription(oneShotSubscriptions) + } + + def unsubscribe(subscriber: ActorRef): Unit = { + permanentSubscriptions = permanentSubscriptions - subscriber + oneShotSubscriptions = oneShotSubscriptions - subscriber + } + + def dispatch(snapshots: Map[Entity, EntitySnapshot]): Unit = { + val currentTick = MilliTimestamp.now + + dispatchSelections(lastTick, currentTick, permanentSubscriptions, snapshots) + dispatchSelections(lastTick, currentTick, oneShotSubscriptions, snapshots) + + lastTick = currentTick + oneShotSubscriptions = Map.empty[ActorRef, SubscriptionFilter] + } + + def dispatchSelections(lastTick: MilliTimestamp, currentTick: MilliTimestamp, subscriptions: Map[ActorRef, SubscriptionFilter], + snapshots: Map[Entity, EntitySnapshot]): Unit = { + + for ((subscriber, filter) ← subscriptions) { + val selection = snapshots.filter(group ⇒ filter.accept(group._1)) + val tickMetrics = TickMetricSnapshot(lastTick, currentTick, selection) + + subscriber ! tickMetrics + } + } +} + +object SubscriptionsDispatcher { + def props(interval: FiniteDuration, metricsExtension: MetricsModuleImpl): Props = + Props(new SubscriptionsDispatcher(interval, metricsExtension)) + + case object Tick + case class Unsubscribe(subscriber: ActorRef) + case class Subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanently: Boolean = false) + case class TickMetricSnapshot(from: MilliTimestamp, to: MilliTimestamp, metrics: Map[Entity, EntitySnapshot]) + +} + +trait SubscriptionFilter { self ⇒ + + def accept(entity: Entity): Boolean + + final def combine(that: SubscriptionFilter): SubscriptionFilter = new SubscriptionFilter { + override def accept(entity: Entity): Boolean = self.accept(entity) || that.accept(entity) + } +} + +object SubscriptionFilter { + val Empty = new SubscriptionFilter { + def accept(entity: Entity): Boolean = false + } + + def apply(category: String, name: String): SubscriptionFilter = new SubscriptionFilter { + val categoryPattern = new GlobPathFilter(category) + val namePattern = new GlobPathFilter(name) + + def accept(entity: Entity): Boolean = { + categoryPattern.accept(entity.category) && namePattern.accept(entity.name) + } + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala b/kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala new file mode 100644 index 00000000..22557974 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/TickMetricSnapshotBuffer.scala @@ -0,0 +1,65 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import akka.actor.{Props, Actor, ActorRef} +import kamon.Kamon +import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot +import kamon.metric.TickMetricSnapshotBuffer.FlushBuffer +import kamon.metric.instrument.CollectionContext +import kamon.util.MapMerge + +import scala.concurrent.duration.FiniteDuration + +class TickMetricSnapshotBuffer(flushInterval: FiniteDuration, receiver: ActorRef) extends Actor { + import MapMerge.Syntax + + val flushSchedule = context.system.scheduler.schedule(flushInterval, flushInterval, self, FlushBuffer)(context.dispatcher) + val collectionContext: CollectionContext = Kamon.metrics.buildDefaultCollectionContext + + def receive = empty + + def empty: Actor.Receive = { + case tick: TickMetricSnapshot ⇒ context become (buffering(tick)) + case FlushBuffer ⇒ // Nothing to flush. + } + + def buffering(buffered: TickMetricSnapshot): Actor.Receive = { + case TickMetricSnapshot(_, to, tickMetrics) ⇒ + val combinedMetrics = buffered.metrics.merge(tickMetrics, (l, r) ⇒ l.merge(r, collectionContext)) + val combinedSnapshot = TickMetricSnapshot(buffered.from, to, combinedMetrics) + + context become (buffering(combinedSnapshot)) + + case FlushBuffer ⇒ + receiver ! buffered + context become (empty) + + } + + override def postStop(): Unit = { + flushSchedule.cancel() + super.postStop() + } +} + +object TickMetricSnapshotBuffer { + case object FlushBuffer + + def props(flushInterval: FiniteDuration, receiver: ActorRef): Props = + Props[TickMetricSnapshotBuffer](new TickMetricSnapshotBuffer(flushInterval, receiver)) +} diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala b/kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala new file mode 100644 index 00000000..eaeebb97 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/TraceMetrics.scala @@ -0,0 +1,53 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import kamon.metric.instrument.{Time, InstrumentFactory} + +class TraceMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { + + /** + * Records blah blah + */ + val elapsedTime = histogram("elapsed-time", unitOfMeasurement = Time.Nanoseconds) + val errors = counter("errors") +} + +object TraceMetrics extends EntityRecorderFactory[TraceMetrics] { + def category: String = "trace" + def createRecorder(instrumentFactory: InstrumentFactory): TraceMetrics = new TraceMetrics(instrumentFactory) + + // Java API. + def factory: EntityRecorderFactory[TraceMetrics] = this +} + +class SegmentMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { + + /** + * Records blah blah + */ + val elapsedTime = histogram("elapsed-time", unitOfMeasurement = Time.Nanoseconds) + val errors = counter("errors") +} + +object SegmentMetrics extends EntityRecorderFactory[SegmentMetrics] { + def category: String = "trace-segment" + def createRecorder(instrumentFactory: InstrumentFactory): SegmentMetrics = new SegmentMetrics(instrumentFactory) + + // Java API. + def factory: EntityRecorderFactory[SegmentMetrics] = this +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala new file mode 100644 index 00000000..b7ab60de --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Counter.scala @@ -0,0 +1,65 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import kamon.jsr166.LongAdder + +trait Counter extends Instrument { + type SnapshotType = Counter.Snapshot + + def increment(): Unit + def increment(times: Long): Unit +} + +object Counter { + + def apply(): Counter = new LongAdderCounter + def create(): Counter = apply() + + trait Snapshot extends InstrumentSnapshot { + def count: Long + def merge(that: InstrumentSnapshot, context: CollectionContext): Counter.Snapshot + def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Counter.Snapshot + } +} + +class LongAdderCounter extends Counter { + private val counter = new LongAdder + + def increment(): Unit = counter.increment() + + def increment(times: Long): Unit = { + if (times < 0) + throw new UnsupportedOperationException("Counters cannot be decremented") + counter.add(times) + } + + def collect(context: CollectionContext): Counter.Snapshot = CounterSnapshot(counter.sumAndReset()) + + def cleanup: Unit = {} +} + +case class CounterSnapshot(count: Long) extends Counter.Snapshot { + def merge(that: InstrumentSnapshot, context: CollectionContext): Counter.Snapshot = that match { + case CounterSnapshot(thatCount) ⇒ CounterSnapshot(count + thatCount) + case other ⇒ sys.error(s"Cannot merge a CounterSnapshot with the incompatible [${other.getClass.getName}] type.") + } + + override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Counter.Snapshot = + CounterSnapshot(from.tryScale(to)(count).toLong) + +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala new file mode 100644 index 00000000..39571d3d --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Gauge.scala @@ -0,0 +1,120 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import java.util.concurrent.atomic.{AtomicLong, AtomicLongFieldUpdater, AtomicReference} + +import akka.actor.Cancellable +import kamon.metric.instrument.Gauge.CurrentValueCollector +import kamon.metric.instrument.Histogram.DynamicRange + +import scala.concurrent.duration.FiniteDuration + +trait Gauge extends Instrument { + type SnapshotType = Histogram.Snapshot + + def record(value: Long): Unit + def record(value: Long, count: Long): Unit + def refreshValue(): Unit +} + +object Gauge { + + def apply(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler, valueCollector: CurrentValueCollector): Gauge = { + val underlyingHistogram = Histogram(dynamicRange) + val gauge = new HistogramBackedGauge(underlyingHistogram, valueCollector) + val refreshValuesSchedule = scheduler.schedule(refreshInterval, () ⇒ { + gauge.refreshValue() + }) + + gauge.automaticValueCollectorSchedule.set(refreshValuesSchedule) + gauge + } + + def create(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler, valueCollector: CurrentValueCollector): Gauge = + apply(dynamicRange, refreshInterval, scheduler, valueCollector) + + trait CurrentValueCollector { + def currentValue: Long + } + + implicit def functionZeroAsCurrentValueCollector(f: () ⇒ Long): CurrentValueCollector = new CurrentValueCollector { + def currentValue: Long = f.apply() + } + + implicit def callByNameLongAsCurrentValueCollector(f: ⇒ Long): CurrentValueCollector = new CurrentValueCollector { + def currentValue: Long = f + } +} + +/** + * Helper for cases in which a gauge shouldn't store the current value of a observed value but the difference between + * the current observed value and the previously observed value. Should only be used if the observed value is always + * increasing or staying steady, but is never able to decrease. + * + * Note: The first time a value is collected, this wrapper will always return zero, afterwards, the difference between + * the current value and the last value will be returned. + */ +class DifferentialValueCollector(wrappedValueCollector: CurrentValueCollector) extends CurrentValueCollector { + @volatile private var _readAtLeastOnce = false + private val _lastObservedValue = new AtomicLong(0) + + def currentValue: Long = { + if (_readAtLeastOnce) { + val wrappedCurrent = wrappedValueCollector.currentValue + val diff = wrappedCurrent - _lastObservedValue.getAndSet(wrappedCurrent) + + if (diff >= 0) diff else 0L + + } else { + _lastObservedValue.set(wrappedValueCollector.currentValue) + _readAtLeastOnce = true + 0L + } + + } +} + +object DifferentialValueCollector { + def apply(wrappedValueCollector: CurrentValueCollector): CurrentValueCollector = + new DifferentialValueCollector(wrappedValueCollector) + + def apply(wrappedValueCollector: ⇒ Long): CurrentValueCollector = + new DifferentialValueCollector(new CurrentValueCollector { + def currentValue: Long = wrappedValueCollector + }) +} + +class HistogramBackedGauge(underlyingHistogram: Histogram, currentValueCollector: Gauge.CurrentValueCollector) extends Gauge { + private[kamon] val automaticValueCollectorSchedule = new AtomicReference[Cancellable]() + + def record(value: Long): Unit = underlyingHistogram.record(value) + + def record(value: Long, count: Long): Unit = underlyingHistogram.record(value, count) + + def collect(context: CollectionContext): Histogram.Snapshot = underlyingHistogram.collect(context) + + def cleanup: Unit = { + if (automaticValueCollectorSchedule.get() != null) + automaticValueCollectorSchedule.get().cancel() + } + + def refreshValue(): Unit = + underlyingHistogram.record(currentValueCollector.currentValue) + +} + diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala new file mode 100644 index 00000000..399f0880 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Histogram.scala @@ -0,0 +1,331 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import java.nio.LongBuffer + +import kamon.metric.instrument.Histogram.{DynamicRange, Snapshot} +import kamon.util.logger.LazyLogger +import org.HdrHistogram.ModifiedAtomicHistogram + +trait Histogram extends Instrument { + type SnapshotType = Histogram.Snapshot + + def record(value: Long): Unit + def record(value: Long, count: Long): Unit +} + +object Histogram { + + /** + * Scala API: + * + * Create a new High Dynamic Range Histogram ([[kamon.metric.instrument.HdrHistogram]]) using the given + * [[kamon.metric.instrument.Histogram.DynamicRange]]. + */ + def apply(dynamicRange: DynamicRange): Histogram = new HdrHistogram(dynamicRange) + + /** + * Java API: + * + * Create a new High Dynamic Range Histogram ([[kamon.metric.instrument.HdrHistogram]]) using the given + * [[kamon.metric.instrument.Histogram.DynamicRange]]. + */ + def create(dynamicRange: DynamicRange): Histogram = apply(dynamicRange) + + /** + * DynamicRange is a configuration object used to supply range and precision configuration to a + * [[kamon.metric.instrument.HdrHistogram]]. See the [[http://hdrhistogram.github.io/HdrHistogram/ HdrHistogram website]] + * for more details on how it works and the effects of these configuration values. + * + * @param lowestDiscernibleValue + * The lowest value that can be discerned (distinguished from 0) by the histogram.Must be a positive integer that + * is >= 1. May be internally rounded down to nearest power of 2. + * @param highestTrackableValue + * The highest value to be tracked by the histogram. Must be a positive integer that is >= (2 * lowestDiscernibleValue). + * Must not be larger than (Long.MAX_VALUE/2). + * @param precision + * The number of significant decimal digits to which the histogram will maintain value resolution and separation. + * Must be a non-negative integer between 1 and 3. + */ + case class DynamicRange(lowestDiscernibleValue: Long, highestTrackableValue: Long, precision: Int) + + trait Record { + def level: Long + def count: Long + + private[kamon] def rawCompactRecord: Long + } + + case class MutableRecord(var level: Long, var count: Long) extends Record { + var rawCompactRecord: Long = 0L + } + + trait Snapshot extends InstrumentSnapshot { + + def isEmpty: Boolean = numberOfMeasurements == 0 + def numberOfMeasurements: Long + def min: Long + def max: Long + def sum: Long + def percentile(percentile: Double): Long + def recordsIterator: Iterator[Record] + def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot + def merge(that: Histogram.Snapshot, context: CollectionContext): Histogram.Snapshot + + override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot = + new ScaledSnapshot(from, to, this) + } + + class ScaledSnapshot(from: UnitOfMeasurement, to: UnitOfMeasurement, snapshot: Snapshot) extends Snapshot { + private def doScale(v: Long) = from.tryScale(to)(v).toLong + override def numberOfMeasurements: Long = snapshot.numberOfMeasurements + + override def max: Long = doScale(snapshot.max) + + override def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot = snapshot.merge(that, context) + + override def merge(that: Snapshot, context: CollectionContext): Snapshot = snapshot.merge(that, context) + + override def percentile(percentile: Double): Long = doScale(snapshot.percentile(percentile)) + + override def min: Long = doScale(snapshot.min) + + override def sum: Long = doScale(snapshot.sum) + + override def recordsIterator: Iterator[Record] = { + snapshot.recordsIterator.map(record ⇒ new Record { + override def count: Long = record.count + + override def level: Long = doScale(record.level) + + override private[kamon] def rawCompactRecord: Long = record.rawCompactRecord + }) + } + + override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot = + if (this.from == from && this.to == to) this else super.scale(from, to) + } + + object Snapshot { + val empty = new Snapshot { + override def min: Long = 0L + override def max: Long = 0L + override def sum: Long = 0L + override def percentile(percentile: Double): Long = 0L + override def recordsIterator: Iterator[Record] = Iterator.empty + override def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot = that + override def merge(that: Histogram.Snapshot, context: CollectionContext): Histogram.Snapshot = that + override def numberOfMeasurements: Long = 0L + override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot = this + } + } +} + +object HdrHistogram { + private val log = LazyLogger(classOf[HdrHistogram]) +} + +/** + * This implementation is meant to be used for real time data collection where data snapshots are taken often over time. + * The collect(..) operation extracts all the recorded values from the histogram and resets the counts, but still + * leave it in a consistent state even in the case of concurrent modification while the snapshot is being taken. + */ +class HdrHistogram(dynamicRange: DynamicRange) extends ModifiedAtomicHistogram( + dynamicRange.lowestDiscernibleValue, + dynamicRange.highestTrackableValue, dynamicRange.precision +) with Histogram { + import HdrHistogram.log + + def record(value: Long): Unit = tryRecord(value, 1L) + + def record(value: Long, count: Long): Unit = tryRecord(value, count) + + private def tryRecord(value: Long, count: Long): Unit = { + try { + recordValueWithCount(value, count) + } catch { + case anyException: Throwable ⇒ + log.warn(s"Failed to store value $value in HdrHistogram, please review your range configuration.", anyException) + } + } + + def collect(context: CollectionContext): Histogram.Snapshot = { + import context.buffer + buffer.clear() + val nrOfMeasurements = writeSnapshotTo(buffer) + + buffer.flip() + + val measurementsArray = Array.ofDim[Long](buffer.limit()) + buffer.get(measurementsArray, 0, measurementsArray.length) + new CompactHdrSnapshot(nrOfMeasurements, measurementsArray, protectedUnitMagnitude(), protectedSubBucketHalfCount(), protectedSubBucketHalfCountMagnitude()) + } + + def getCounts = countsArray().length() + + def cleanup: Unit = {} + + private def writeSnapshotTo(buffer: LongBuffer): Long = { + val counts = countsArray() + val countsLength = counts.length() + + var nrOfMeasurements = 0L + var index = 0L + while (index < countsLength) { + val countAtIndex = counts.getAndSet(index.toInt, 0L) + + if (countAtIndex > 0) { + buffer.put(CompactHdrSnapshot.compactRecord(index, countAtIndex)) + nrOfMeasurements += countAtIndex + } + + index += 1 + } + nrOfMeasurements + } +} + +case class CompactHdrSnapshot(numberOfMeasurements: Long, compactRecords: Array[Long], unitMagnitude: Int, + subBucketHalfCount: Int, subBucketHalfCountMagnitude: Int) extends Histogram.Snapshot { + + def min: Long = if (compactRecords.length == 0) 0 else levelFromCompactRecord(compactRecords(0)) + def max: Long = if (compactRecords.length == 0) 0 else levelFromCompactRecord(compactRecords(compactRecords.length - 1)) + def sum: Long = recordsIterator.foldLeft(0L)((a, r) ⇒ a + (r.count * r.level)) + + def percentile(p: Double): Long = { + val records = recordsIterator + val threshold = numberOfMeasurements * (p / 100D) + var countToCurrentLevel = 0L + var percentileLevel = 0L + + while (countToCurrentLevel < threshold && records.hasNext) { + val record = records.next() + countToCurrentLevel += record.count + percentileLevel = record.level + } + + percentileLevel + } + + def merge(that: Histogram.Snapshot, context: CollectionContext): Snapshot = + merge(that.asInstanceOf[InstrumentSnapshot], context) + + def merge(that: InstrumentSnapshot, context: CollectionContext): Histogram.Snapshot = that match { + case thatSnapshot: CompactHdrSnapshot ⇒ + if (thatSnapshot.isEmpty) this else if (this.isEmpty) thatSnapshot else { + import context.buffer + buffer.clear() + + val selfIterator = recordsIterator + val thatIterator = thatSnapshot.recordsIterator + var thatCurrentRecord: Histogram.Record = null + var mergedNumberOfMeasurements = 0L + + def nextOrNull(iterator: Iterator[Histogram.Record]): Histogram.Record = if (iterator.hasNext) iterator.next() else null + def addToBuffer(compactRecord: Long): Unit = { + mergedNumberOfMeasurements += countFromCompactRecord(compactRecord) + buffer.put(compactRecord) + } + + while (selfIterator.hasNext) { + val selfCurrentRecord = selfIterator.next() + + // Advance that to no further than the level of selfCurrentRecord + thatCurrentRecord = if (thatCurrentRecord == null) nextOrNull(thatIterator) else thatCurrentRecord + while (thatCurrentRecord != null && thatCurrentRecord.level < selfCurrentRecord.level) { + addToBuffer(thatCurrentRecord.rawCompactRecord) + thatCurrentRecord = nextOrNull(thatIterator) + } + + // Include the current record of self and optionally merge if has the same level as thatCurrentRecord + if (thatCurrentRecord != null && thatCurrentRecord.level == selfCurrentRecord.level) { + addToBuffer(mergeCompactRecords(thatCurrentRecord.rawCompactRecord, selfCurrentRecord.rawCompactRecord)) + thatCurrentRecord = nextOrNull(thatIterator) + } else { + addToBuffer(selfCurrentRecord.rawCompactRecord) + } + } + + // Include everything that might have been left from that + if (thatCurrentRecord != null) addToBuffer(thatCurrentRecord.rawCompactRecord) + while (thatIterator.hasNext) { + addToBuffer(thatIterator.next().rawCompactRecord) + } + + buffer.flip() + val compactRecords = Array.ofDim[Long](buffer.limit()) + buffer.get(compactRecords) + + new CompactHdrSnapshot(mergedNumberOfMeasurements, compactRecords, unitMagnitude, subBucketHalfCount, subBucketHalfCountMagnitude) + } + + case other ⇒ + sys.error(s"Cannot merge a CompactHdrSnapshot with the incompatible [${other.getClass.getName}] type.") + + } + + @inline private def mergeCompactRecords(left: Long, right: Long): Long = { + val index = left >> 48 + val leftCount = countFromCompactRecord(left) + val rightCount = countFromCompactRecord(right) + + CompactHdrSnapshot.compactRecord(index, leftCount + rightCount) + } + + @inline private def levelFromCompactRecord(compactRecord: Long): Long = { + val countsArrayIndex = (compactRecord >> 48).toInt + var bucketIndex: Int = (countsArrayIndex >> subBucketHalfCountMagnitude) - 1 + var subBucketIndex: Int = (countsArrayIndex & (subBucketHalfCount - 1)) + subBucketHalfCount + if (bucketIndex < 0) { + subBucketIndex -= subBucketHalfCount + bucketIndex = 0 + } + + subBucketIndex.toLong << (bucketIndex + unitMagnitude) + } + + @inline private def countFromCompactRecord(compactRecord: Long): Long = + compactRecord & CompactHdrSnapshot.CompactRecordCountMask + + def recordsIterator: Iterator[Histogram.Record] = new Iterator[Histogram.Record] { + var currentIndex = 0 + val mutableRecord = Histogram.MutableRecord(0, 0) + + override def hasNext: Boolean = currentIndex < compactRecords.length + + override def next(): Histogram.Record = { + if (hasNext) { + val measurement = compactRecords(currentIndex) + mutableRecord.rawCompactRecord = measurement + mutableRecord.level = levelFromCompactRecord(measurement) + mutableRecord.count = countFromCompactRecord(measurement) + currentIndex += 1 + + mutableRecord + } else { + throw new IllegalStateException("The iterator has already been consumed.") + } + } + } +} + +object CompactHdrSnapshot { + val CompactRecordCountMask = 0xFFFFFFFFFFFFL + + def compactRecord(index: Long, count: Long): Long = (index << 48) | count +} diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala new file mode 100644 index 00000000..2c4b4319 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/Instrument.scala @@ -0,0 +1,51 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import java.nio.LongBuffer + +private[kamon] trait Instrument { + type SnapshotType <: InstrumentSnapshot + + def collect(context: CollectionContext): SnapshotType + def cleanup: Unit +} + +trait InstrumentSnapshot { + def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot + + def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): InstrumentSnapshot +} + +trait CollectionContext { + def buffer: LongBuffer +} + +object CollectionContext { + def apply(longBufferSize: Int): CollectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(longBufferSize) + } +} + +sealed trait InstrumentType + +object InstrumentTypes { + case object Histogram extends InstrumentType + case object MinMaxCounter extends InstrumentType + case object Gauge extends InstrumentType + case object Counter extends InstrumentType +} diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala new file mode 100644 index 00000000..7c0201f7 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentFactory.scala @@ -0,0 +1,51 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import kamon.metric.instrument.Gauge.CurrentValueCollector +import kamon.metric.instrument.Histogram.DynamicRange + +import scala.concurrent.duration.FiniteDuration + +case class InstrumentFactory(configurations: Map[String, InstrumentCustomSettings], defaults: DefaultInstrumentSettings, scheduler: RefreshScheduler) { + + private def resolveSettings(instrumentName: String, codeSettings: Option[InstrumentSettings], default: InstrumentSettings): InstrumentSettings = { + configurations.get(instrumentName).flatMap { customSettings ⇒ + codeSettings.map(cs ⇒ customSettings.combine(cs)) orElse (Some(customSettings.combine(default))) + + } getOrElse (codeSettings.getOrElse(default)) + } + + def createHistogram(name: String, dynamicRange: Option[DynamicRange] = None): Histogram = { + val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, None)), defaults.histogram) + Histogram(settings.dynamicRange) + } + + def createMinMaxCounter(name: String, dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None): MinMaxCounter = { + val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, refreshInterval)), defaults.minMaxCounter) + MinMaxCounter(settings.dynamicRange, settings.refreshInterval.get, scheduler) + } + + def createGauge(name: String, dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None, + valueCollector: CurrentValueCollector): Gauge = { + + val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, refreshInterval)), defaults.gauge) + Gauge(settings.dynamicRange, settings.refreshInterval.get, scheduler, valueCollector) + } + + def createCounter(): Counter = Counter() +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala new file mode 100644 index 00000000..e4d6f547 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/InstrumentSettings.scala @@ -0,0 +1,73 @@ +package kamon.metric.instrument + +import com.typesafe.config.Config +import kamon.metric.instrument.Histogram.DynamicRange + +import scala.concurrent.duration.FiniteDuration + +case class InstrumentCustomSettings(lowestDiscernibleValue: Option[Long], highestTrackableValue: Option[Long], + precision: Option[Int], refreshInterval: Option[FiniteDuration]) { + + def combine(that: InstrumentSettings): InstrumentSettings = + InstrumentSettings( + DynamicRange( + lowestDiscernibleValue.getOrElse(that.dynamicRange.lowestDiscernibleValue), + highestTrackableValue.getOrElse(that.dynamicRange.highestTrackableValue), + precision.getOrElse(that.dynamicRange.precision) + ), + refreshInterval.orElse(that.refreshInterval) + ) +} + +object InstrumentCustomSettings { + import kamon.util.ConfigTools.Syntax + + def fromConfig(config: Config): InstrumentCustomSettings = + InstrumentCustomSettings( + if (config.hasPath("lowest-discernible-value")) Some(config.getLong("lowest-discernible-value")) else None, + if (config.hasPath("highest-trackable-value")) Some(config.getLong("highest-trackable-value")) else None, + if (config.hasPath("precision")) Some(InstrumentSettings.parsePrecision(config.getString("precision"))) else None, + if (config.hasPath("refresh-interval")) Some(config.getFiniteDuration("refresh-interval")) else None + ) + +} + +case class InstrumentSettings(dynamicRange: DynamicRange, refreshInterval: Option[FiniteDuration]) + +object InstrumentSettings { + + def readDynamicRange(config: Config): DynamicRange = + DynamicRange( + config.getLong("lowest-discernible-value"), + config.getLong("highest-trackable-value"), + parsePrecision(config.getString("precision")) + ) + + def parsePrecision(stringValue: String): Int = stringValue match { + case "low" ⇒ 1 + case "normal" ⇒ 2 + case "fine" ⇒ 3 + case other ⇒ sys.error(s"Invalid precision configuration [$other] found, valid options are: [low|normal|fine].") + } +} + +case class DefaultInstrumentSettings(histogram: InstrumentSettings, minMaxCounter: InstrumentSettings, gauge: InstrumentSettings) + +object DefaultInstrumentSettings { + + def fromConfig(config: Config): DefaultInstrumentSettings = { + import kamon.util.ConfigTools.Syntax + + val histogramSettings = InstrumentSettings(InstrumentSettings.readDynamicRange(config.getConfig("histogram")), None) + val minMaxCounterSettings = InstrumentSettings( + InstrumentSettings.readDynamicRange(config.getConfig("min-max-counter")), + Some(config.getFiniteDuration("min-max-counter.refresh-interval")) + ) + val gaugeSettings = InstrumentSettings( + InstrumentSettings.readDynamicRange(config.getConfig("gauge")), + Some(config.getFiniteDuration("gauge.refresh-interval")) + ) + + DefaultInstrumentSettings(histogramSettings, minMaxCounterSettings, gaugeSettings) + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala new file mode 100644 index 00000000..76fc2c2a --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/MinMaxCounter.scala @@ -0,0 +1,105 @@ +package kamon.metric.instrument + +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +import java.lang.Math.abs +import java.util.concurrent.atomic.AtomicReference +import akka.actor.Cancellable +import kamon.jsr166.LongMaxUpdater +import kamon.metric.instrument.Histogram.DynamicRange +import kamon.util.PaddedAtomicLong +import scala.concurrent.duration.FiniteDuration + +trait MinMaxCounter extends Instrument { + override type SnapshotType = Histogram.Snapshot + + def increment(): Unit + def increment(times: Long): Unit + def decrement(): Unit + def decrement(times: Long): Unit + def refreshValues(): Unit +} + +object MinMaxCounter { + + def apply(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler): MinMaxCounter = { + val underlyingHistogram = Histogram(dynamicRange) + val minMaxCounter = new PaddedMinMaxCounter(underlyingHistogram) + val refreshValuesSchedule = scheduler.schedule(refreshInterval, () ⇒ { + minMaxCounter.refreshValues() + }) + + minMaxCounter.refreshValuesSchedule.set(refreshValuesSchedule) + minMaxCounter + } + + def create(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler): MinMaxCounter = + apply(dynamicRange, refreshInterval, scheduler) + +} + +class PaddedMinMaxCounter(underlyingHistogram: Histogram) extends MinMaxCounter { + private val min = new LongMaxUpdater(0L) + private val max = new LongMaxUpdater(0L) + private val sum = new PaddedAtomicLong + val refreshValuesSchedule = new AtomicReference[Cancellable]() + + def increment(): Unit = increment(1L) + + def increment(times: Long): Unit = { + val currentValue = sum.addAndGet(times) + max.update(currentValue) + } + + def decrement(): Unit = decrement(1L) + + def decrement(times: Long): Unit = { + val currentValue = sum.addAndGet(-times) + min.update(-currentValue) + } + + def collect(context: CollectionContext): Histogram.Snapshot = { + refreshValues() + underlyingHistogram.collect(context) + } + + def cleanup: Unit = { + if (refreshValuesSchedule.get() != null) + refreshValuesSchedule.get().cancel() + } + + def refreshValues(): Unit = { + val currentValue = { + val value = sum.get() + if (value <= 0) 0 else value + } + + val currentMin = { + val rawMin = min.maxThenReset(-currentValue) + if (rawMin >= 0) + 0 + else + abs(rawMin) + } + + val currentMax = max.maxThenReset(currentValue) + + underlyingHistogram.record(currentValue) + underlyingHistogram.record(currentMin) + underlyingHistogram.record(currentMax) + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala new file mode 100644 index 00000000..eb01d114 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala @@ -0,0 +1,31 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package org.HdrHistogram + +import java.util.concurrent.atomic.AtomicLongArray + +abstract class ModifiedAtomicHistogram(low: Long, high: Long, precision: Int) + extends AtomicHistogram(low, high, precision) { self ⇒ + + override def incrementTotalCount(): Unit = {} + override def addToTotalCount(value: Long): Unit = {} + + def countsArray(): AtomicLongArray = counts + def protectedUnitMagnitude(): Int = unitMagnitude + def protectedSubBucketHalfCount(): Int = subBucketHalfCount + def protectedSubBucketHalfCountMagnitude(): Int = subBucketHalfCountMagnitude +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala new file mode 100644 index 00000000..6bc02dc3 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/RefreshScheduler.scala @@ -0,0 +1,115 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import akka.actor.{Scheduler, Cancellable} +import org.HdrHistogram.WriterReaderPhaser + +import scala.collection.concurrent.TrieMap +import scala.concurrent.ExecutionContext +import scala.concurrent.duration.FiniteDuration + +trait RefreshScheduler { + def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable +} + +/** + * Default implementation of RefreshScheduler that simply uses an [[akka.actor.Scheduler]] to schedule tasks to be run + * in the provided ExecutionContext. + */ +class DefaultRefreshScheduler(scheduler: Scheduler, dispatcher: ExecutionContext) extends RefreshScheduler { + def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable = + scheduler.schedule(interval, interval)(refresh.apply())(dispatcher) +} + +object DefaultRefreshScheduler { + def apply(scheduler: Scheduler, dispatcher: ExecutionContext): RefreshScheduler = + new DefaultRefreshScheduler(scheduler, dispatcher) + + def create(scheduler: Scheduler, dispatcher: ExecutionContext): RefreshScheduler = + apply(scheduler, dispatcher) +} + +/** + * RefreshScheduler implementation that accumulates all the scheduled actions until it is pointed to another refresh + * scheduler. Once it is pointed, all subsequent calls to `schedule` will immediately be scheduled in the pointed + * scheduler. + */ +class LazyRefreshScheduler extends RefreshScheduler { + private val _schedulerPhaser = new WriterReaderPhaser + private val _backlog = new TrieMap[(FiniteDuration, () ⇒ Unit), RepointableCancellable]() + @volatile private var _target: Option[RefreshScheduler] = None + + def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable = { + val criticalEnter = _schedulerPhaser.writerCriticalSectionEnter() + try { + _target.map { scheduler ⇒ + scheduler.schedule(interval, refresh) + + } getOrElse { + val entry = (interval, refresh) + val cancellable = new RepointableCancellable(entry) + + _backlog.put(entry, cancellable) + cancellable + } + + } finally { + _schedulerPhaser.writerCriticalSectionExit(criticalEnter) + } + } + + def point(target: RefreshScheduler): Unit = try { + _schedulerPhaser.readerLock() + + if (_target.isEmpty) { + _target = Some(target) + _schedulerPhaser.flipPhase(10000L) + _backlog.dropWhile { + case ((interval, refresh), repointableCancellable) ⇒ + repointableCancellable.point(target.schedule(interval, refresh)) + true + } + } else sys.error("A LazyRefreshScheduler cannot be pointed more than once.") + } finally { _schedulerPhaser.readerUnlock() } + + class RepointableCancellable(entry: (FiniteDuration, () ⇒ Unit)) extends Cancellable { + private var _isCancelled = false + private var _cancellable: Option[Cancellable] = None + + def isCancelled: Boolean = synchronized { + _cancellable.map(_.isCancelled).getOrElse(_isCancelled) + } + + def cancel(): Boolean = synchronized { + _isCancelled = true + _cancellable.map(_.cancel()).getOrElse(_backlog.remove(entry).nonEmpty) + } + + def point(cancellable: Cancellable): Unit = synchronized { + if (_cancellable.isEmpty) { + _cancellable = Some(cancellable) + + if (_isCancelled) + cancellable.cancel() + + } else sys.error("A RepointableCancellable cannot be pointed more than once.") + + } + } +} + diff --git a/kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala new file mode 100644 index 00000000..5952b906 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/metric/instrument/UnitOfMeasurement.scala @@ -0,0 +1,109 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +/** + * A UnitOfMeasurement implementation describes the magnitude of a quantity being measured, such as Time and computer + * Memory space. Kamon uses UnitOfMeasurement implementations just as a informative companion to metrics inside entity + * recorders and might be used to scale certain kinds of measurements in metric backends. + */ +trait UnitOfMeasurement { + type U <: UnitOfMeasurement + + def name: String + def label: String + def scale(toUnit: U)(value: Double): Double = value + + def tryScale(toUnit: UnitOfMeasurement)(value: Double): Double = + if (canScale(toUnit)) scale(toUnit.asInstanceOf[U])(value) + else throw new IllegalArgumentException(s"Can't scale different types of units `$name` and `${toUnit.name}`") + + protected def canScale(toUnit: UnitOfMeasurement): Boolean + +} + +object UnitOfMeasurement { + case object Unknown extends UnitOfMeasurement { + override type U = Unknown.type + val name = "unknown" + val label = "unknown" + + override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isUnknown(toUnit) + } + + def isUnknown(uom: UnitOfMeasurement): Boolean = + uom == Unknown + + def isTime(uom: UnitOfMeasurement): Boolean = + uom.isInstanceOf[Time] + + def isMemory(uom: UnitOfMeasurement): Boolean = + uom.isInstanceOf[Memory] + +} + +/** + * UnitOfMeasurement representing time. + */ +case class Time(factor: Double, label: String) extends UnitOfMeasurement { + override type U = Time + val name = "time" + + override def scale(toUnit: Time)(value: Double): Double = + (value * factor) / toUnit.factor + + override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isTime(toUnit) +} + +object Time { + val Nanoseconds = Time(1E-9, "n") + val Microseconds = Time(1E-6, "µs") + val Milliseconds = Time(1E-3, "ms") + val Seconds = Time(1, "s") + + val units = List(Nanoseconds, Microseconds, Milliseconds, Seconds) + + def apply(time: String): Time = units.find(_.label.toLowerCase == time.toLowerCase) getOrElse { + throw new IllegalArgumentException(s"Can't recognize time unit '$time'") + } +} + +/** + * UnitOfMeasurement representing computer memory space. + */ +case class Memory(factor: Double, label: String) extends UnitOfMeasurement { + override type U = Memory + val name = "bytes" + + override def scale(toUnit: Memory)(value: Double): Double = + (value * factor) / toUnit.factor + + override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isMemory(toUnit) +} + +object Memory { + val Bytes = Memory(1, "b") + val KiloBytes = Memory(1024, "Kb") + val MegaBytes = Memory(1024 * 1024, "Mb") + val GigaBytes = Memory(1024 * 1024 * 1024, "Gb") + + val units = List(Bytes, KiloBytes, MegaBytes, GigaBytes) + + def apply(memory: String): Memory = units.find(_.label.toLowerCase == memory.toLowerCase) getOrElse { + throw new IllegalArgumentException(s"Can't recognize memory unit '$memory'") + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala b/kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala new file mode 100644 index 00000000..fe5ad569 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/Incubator.scala @@ -0,0 +1,94 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import akka.actor.{ActorLogging, Props, Actor, ActorRef} +import kamon.trace.Incubator.{CheckForCompletedTraces, IncubatingTrace} +import kamon.util.{NanoInterval, RelativeNanoTimestamp} +import scala.annotation.tailrec +import scala.collection.immutable.Queue +import kamon.util.ConfigTools.Syntax + +class Incubator(subscriptions: ActorRef) extends Actor with ActorLogging { + import context.dispatcher + val config = context.system.settings.config.getConfig("kamon.trace.incubator") + + val minIncubationTime = new NanoInterval(config.getFiniteDuration("min-incubation-time").toNanos) + val maxIncubationTime = new NanoInterval(config.getFiniteDuration("max-incubation-time").toNanos) + val checkInterval = config.getFiniteDuration("check-interval") + + val checkSchedule = context.system.scheduler.schedule(checkInterval, checkInterval, self, CheckForCompletedTraces) + var waitingForMinimumIncubation = Queue.empty[IncubatingTrace] + var waitingForIncubationFinish = List.empty[IncubatingTrace] + + def receive = { + case tc: TracingContext ⇒ incubate(tc) + case CheckForCompletedTraces ⇒ + checkWaitingForMinimumIncubation() + checkWaitingForIncubationFinish() + } + + def incubate(tc: TracingContext): Unit = + waitingForMinimumIncubation = waitingForMinimumIncubation.enqueue(IncubatingTrace(tc, RelativeNanoTimestamp.now)) + + @tailrec private def checkWaitingForMinimumIncubation(): Unit = { + if (waitingForMinimumIncubation.nonEmpty) { + val it = waitingForMinimumIncubation.head + if (NanoInterval.since(it.incubationStart) >= minIncubationTime) { + waitingForMinimumIncubation = waitingForMinimumIncubation.tail + + if (it.tc.shouldIncubate) + waitingForIncubationFinish = it :: waitingForIncubationFinish + else + dispatchTraceInfo(it.tc) + + checkWaitingForMinimumIncubation() + } + } + } + + private def checkWaitingForIncubationFinish(): Unit = { + waitingForIncubationFinish = waitingForIncubationFinish.filter { + case IncubatingTrace(context, incubationStart) ⇒ + if (!context.shouldIncubate) { + dispatchTraceInfo(context) + false + } else { + if (NanoInterval.since(incubationStart) >= maxIncubationTime) { + log.warning("Trace [{}] with token [{}] has reached the maximum incubation time, will be reported as is.", context.name, context.token) + dispatchTraceInfo(context); + false + } else true + } + } + } + + def dispatchTraceInfo(tc: TracingContext): Unit = subscriptions ! tc.generateTraceInfo + + override def postStop(): Unit = { + super.postStop() + checkSchedule.cancel() + } +} + +object Incubator { + + def props(subscriptions: ActorRef): Props = Props(new Incubator(subscriptions)) + + case object CheckForCompletedTraces + case class IncubatingTrace(tc: TracingContext, incubationStart: RelativeNanoTimestamp) +} diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala b/kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala new file mode 100644 index 00000000..bc0bedba --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/MetricsOnlyContext.scala @@ -0,0 +1,186 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import java.util.concurrent.ConcurrentLinkedQueue + +import akka.event.LoggingAdapter +import kamon.Kamon +import kamon.metric.{SegmentMetrics, TraceMetrics} +import kamon.util.{NanoInterval, RelativeNanoTimestamp} + +import scala.annotation.tailrec +import scala.collection.concurrent.TrieMap + +private[kamon] class MetricsOnlyContext( + traceName: String, + val token: String, + traceTags: Map[String, String], + currentStatus: Status, + val levelOfDetail: LevelOfDetail, + val startTimestamp: RelativeNanoTimestamp, + log: LoggingAdapter +) extends TraceContext { + + @volatile private var _name = traceName + @volatile private var _status = currentStatus + @volatile protected var _elapsedTime = NanoInterval.default + + private val _finishedSegments = new ConcurrentLinkedQueue[SegmentLatencyData]() + private val _traceLocalStorage = new TraceLocalStorage + private val _tags = TrieMap.empty[String, String] ++= traceTags + + def rename(newName: String): Unit = + if (Status.Open == status) + _name = newName + else + log.warning("Can't rename trace from [{}] to [{}] because the trace is already closed.", name, newName) + + def name: String = _name + def tags: Map[String, String] = _tags.toMap + def isEmpty: Boolean = false + def status: Status = _status + def addMetadata(key: String, value: String): Unit = {} + def addTag(key: String, value: String): Unit = _tags.put(key, value) + def removeTag(key: String, value: String): Boolean = _tags.remove(key, value) + + private def finish(withError: Boolean): Unit = { + _status = if (withError) Status.FinishedWithError else Status.FinishedSuccessfully + val traceElapsedTime = NanoInterval.since(startTimestamp) + _elapsedTime = traceElapsedTime + + if (Kamon.metrics.shouldTrack(name, TraceMetrics.category)) { + val traceEntity = Kamon.metrics.entity(TraceMetrics, name, _tags.toMap) + traceEntity.elapsedTime.record(traceElapsedTime.nanos) + if (withError) traceEntity.errors.increment() + } + drainFinishedSegments() + } + + def finish(): Unit = finish(withError = false) + + def finishWithError(cause: Throwable): Unit = { + //we should do something with the Throwable in a near future + finish(withError = true) + } + + def startSegment(segmentName: String, category: String, library: String): Segment = + startSegment(segmentName, category, library, Map.empty[String, String]) + + def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = + new MetricsOnlySegment(segmentName, category, library, tags) + + @tailrec private def drainFinishedSegments(): Unit = { + val segment = _finishedSegments.poll() + if (segment != null) { + val defaultTags = Map( + "trace" → name, + "category" → segment.category, + "library" → segment.library + ) + + if (Kamon.metrics.shouldTrack(segment.name, SegmentMetrics.category)) { + val segmentEntity = Kamon.metrics.entity(SegmentMetrics, segment.name, defaultTags ++ segment.tags) + segmentEntity.elapsedTime.record(segment.duration.nanos) + if (segment.isFinishedWithError) segmentEntity.errors.increment() + } + drainFinishedSegments() + } + } + + protected def finishSegment( + segmentName: String, + category: String, + library: String, + duration: NanoInterval, + segmentTags: Map[String, String], + isFinishedWithError: Boolean + ): Unit = { + + _finishedSegments.add(SegmentLatencyData(segmentName, category, library, duration, segmentTags, isFinishedWithError)) + + if (isClosed) { + drainFinishedSegments() + } + } + + // Should only be used by the TraceLocal utilities. + def traceLocalStorage: TraceLocalStorage = _traceLocalStorage + + // Handle with care and make sure that the trace is closed before calling this method, otherwise NanoInterval.default + // will be returned. + def elapsedTime: NanoInterval = _elapsedTime + + class MetricsOnlySegment( + segmentName: String, + val category: String, + val library: String, + segmentTags: Map[String, String] + ) extends Segment { + + private val _startTimestamp = RelativeNanoTimestamp.now + private val _tags = TrieMap.empty[String, String] ++= segmentTags + + @volatile private var _segmentName = segmentName + @volatile private var _elapsedTime = NanoInterval.default + @volatile private var _status: Status = Status.Open + + def name: String = _segmentName + def tags: Map[String, String] = _tags.toMap + def isEmpty: Boolean = false + def status: Status = _status + def addMetadata(key: String, value: String): Unit = {} + def addTag(key: String, value: String): Unit = _tags.put(key, value) + def removeTag(key: String, value: String): Boolean = _tags.remove(key, value) + + def rename(newName: String): Unit = + if (Status.Open == status) + _segmentName = newName + else + log.warning("Can't rename segment from [{}] to [{}] because the segment is already closed.", name, newName) + + private def finish(withError: Boolean): Unit = { + _status = if (withError) Status.FinishedWithError else Status.FinishedSuccessfully + val segmentElapsedTime = NanoInterval.since(_startTimestamp) + _elapsedTime = segmentElapsedTime + + finishSegment(name, category, library, segmentElapsedTime, _tags.toMap, withError) + } + + def finishWithError(cause: Throwable): Unit = { + //we should do something with the Throwable in a near future + finish(withError = true) + } + + def finish(): Unit = finish(withError = false) + + // Handle with care and make sure that the segment is closed before calling this method, otherwise + // NanoInterval.default will be returned. + def elapsedTime: NanoInterval = _elapsedTime + def startTimestamp: RelativeNanoTimestamp = _startTimestamp + + } +} + +case class SegmentLatencyData( + name: String, + category: String, + library: String, + duration: NanoInterval, + tags: Map[String, String], + isFinishedWithError: Boolean +) \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala b/kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala new file mode 100644 index 00000000..234c67bb --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/Sampler.scala @@ -0,0 +1,101 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import java.net.InetAddress +import java.util.concurrent.atomic.AtomicLong +import kamon.util.{NanoTimestamp, NanoInterval, Sequencer} +import scala.concurrent.forkjoin.ThreadLocalRandom + +import scala.util.Try + +trait Sampler { + def shouldTrace: Boolean + def shouldReport(traceElapsedTime: NanoInterval): Boolean +} + +object NoSampling extends Sampler { + def shouldTrace: Boolean = false + def shouldReport(traceElapsedTime: NanoInterval): Boolean = false +} + +object SampleAll extends Sampler { + def shouldTrace: Boolean = true + def shouldReport(traceElapsedTime: NanoInterval): Boolean = true +} + +class RandomSampler(chance: Int) extends Sampler { + require(chance > 0, "kamon.trace.random-sampler.chance cannot be <= 0") + require(chance <= 100, "kamon.trace.random-sampler.chance cannot be > 100") + + def shouldTrace: Boolean = ThreadLocalRandom.current().nextInt(100) <= chance + def shouldReport(traceElapsedTime: NanoInterval): Boolean = true +} + +class OrderedSampler(interval: Int) extends Sampler { + import OrderedSampler._ + + require(interval > 0, "kamon.trace.ordered-sampler.sample-interval cannot be <= 0") + assume(interval isPowerOfTwo, "kamon.trace.ordered-sampler.sample-interval must be power of two") + + private val sequencer = Sequencer() + + def shouldTrace: Boolean = (sequencer.next() fastMod interval) == 0 + def shouldReport(traceElapsedTime: NanoInterval): Boolean = true +} + +object OrderedSampler { + implicit class EnhancedInt(i: Int) { + def isPowerOfTwo = (i & (i - 1)) == 0 + } + + implicit class EnhancedLong(dividend: Long) { + def fastMod(divisor: Int) = dividend & (divisor - 1) + } +} + +class ThresholdSampler(thresholdInNanoseconds: NanoInterval) extends Sampler { + require(thresholdInNanoseconds.nanos > 0, "kamon.trace.threshold-sampler.minimum-elapsed-time cannot be <= 0") + + def shouldTrace: Boolean = true + def shouldReport(traceElapsedTime: NanoInterval): Boolean = traceElapsedTime >= thresholdInNanoseconds +} + +class ClockSampler(pauseInNanoseconds: NanoInterval) extends Sampler { + require(pauseInNanoseconds.nanos > 0, "kamon.trace.clock-sampler.pause cannot be <= 0") + + private val timer: AtomicLong = new AtomicLong(0L) + + def shouldTrace: Boolean = { + val now = NanoTimestamp.now.nanos + val lastTimer = timer.get() + if ((lastTimer + pauseInNanoseconds.nanos) < now) + timer.compareAndSet(lastTimer, now) + else + false + } + def shouldReport(traceElapsedTime: NanoInterval): Boolean = true +} + +class DefaultTokenGenerator extends Function0[String] { + private val _hostnamePrefix = Try(InetAddress.getLocalHost.getHostName).getOrElse("unknown-localhost") + private val _tokenCounter = new AtomicLong + + def apply(): String = { + _hostnamePrefix + "-" + String.valueOf(_tokenCounter.incrementAndGet()) + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala new file mode 100644 index 00000000..bbf40d8d --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceContext.scala @@ -0,0 +1,202 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import java.io.ObjectStreamException +import java.util + +import kamon.trace.Status.Closed +import kamon.trace.TraceContextAware.DefaultTraceContextAware +import kamon.util.{Function, RelativeNanoTimestamp, SameThreadExecutionContext, Supplier} + +import scala.concurrent.Future + +trait TraceContext { + def name: String + def token: String + def tags: Map[String, String] + def isEmpty: Boolean + def nonEmpty: Boolean = !isEmpty + def isClosed: Boolean = !(Status.Open == status) + def status: Status + def finish(): Unit + def finishWithError(cause: Throwable): Unit + def rename(newName: String): Unit + def startSegment(segmentName: String, category: String, library: String): Segment + def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment + def addMetadata(key: String, value: String): Unit + def addTag(key: String, value: String): Unit + def removeTag(key: String, value: String): Boolean + def startTimestamp: RelativeNanoTimestamp + + def collect[T](f: TraceContext ⇒ T): Option[T] = + if (nonEmpty) + Some(f(this)) + else None + + def collect[T](f: Function[TraceContext, T]): Option[T] = + if (nonEmpty) + Some(f(this)) + else None + + def withNewSegment[T](segmentName: String, category: String, library: String)(code: ⇒ T): T = { + withNewSegment(segmentName, category, library, Map.empty[String, String])(code) + } + + def withNewSegment[T](segmentName: String, category: String, library: String, tags: Map[String, String])(code: ⇒ T): T = { + val segment = startSegment(segmentName, category, library, tags) + try code finally segment.finish() + } + + def withNewAsyncSegment[T](segmentName: String, category: String, library: String)(code: ⇒ Future[T]): Future[T] = { + withNewAsyncSegment(segmentName, category, library, Map.empty[String, String])(code) + } + + def withNewAsyncSegment[T](segmentName: String, category: String, library: String, tags: Map[String, String])(code: ⇒ Future[T]): Future[T] = { + val segment = startSegment(segmentName, category, library, tags) + val result = code + result.onComplete(_ ⇒ segment.finish())(SameThreadExecutionContext) + result + } + + // Java variant. + def withNewSegment[T](segmentName: String, category: String, library: String, code: Supplier[T]): T = + withNewSegment(segmentName, category, library)(code.get) + + def withNewSegment[T](segmentName: String, category: String, library: String, tags: util.Map[String, String], code: Supplier[T]): T = { + import scala.collection.JavaConverters._ + withNewSegment(segmentName, category, library, tags.asScala.toMap)(code.get) + } +} + +trait Segment { + def name: String + def category: String + def library: String + def tags: Map[String, String] + def isEmpty: Boolean + def nonEmpty: Boolean = !isEmpty + def isClosed: Boolean = !(Status.Open == status) + def status: Status + def finish(): Unit + def finishWithError(cause: Throwable): Unit + def rename(newName: String): Unit + def addMetadata(key: String, value: String): Unit + def addTag(key: String, value: String): Unit + def removeTag(key: String, value: String): Boolean +} + +case object EmptyTraceContext extends TraceContext { + def name: String = "empty-trace" + def token: String = "" + def tags: Map[String, String] = Map.empty + def isEmpty: Boolean = true + def status: Status = Closed + def finish(): Unit = {} + def finishWithError(cause: Throwable): Unit = {} + def rename(name: String): Unit = {} + def startSegment(segmentName: String, category: String, library: String): Segment = EmptySegment + def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = EmptySegment + def addMetadata(key: String, value: String): Unit = {} + def startTimestamp = new RelativeNanoTimestamp(0L) + def addTag(key: String, value: String): Unit = {} + def removeTag(key: String, value: String): Boolean = false + + case object EmptySegment extends Segment { + val name: String = "empty-segment" + val category: String = "empty-category" + val library: String = "empty-library" + def tags: Map[String, String] = Map.empty + def isEmpty: Boolean = true + def status: Status = Closed + def finish(): Unit = {} + def finishWithError(cause: Throwable): Unit = {} + def rename(newName: String): Unit = {} + def addMetadata(key: String, value: String): Unit = {} + def addTag(key: String, value: String): Unit = {} + def removeTag(key: String, value: String): Boolean = false + } +} + +object SegmentCategory { + val HttpClient = "http-client" + val Database = "database" +} + +class LOD private[trace] (val level: Int) extends AnyVal +object LOD { + val MetricsOnly = new LOD(1) + val SimpleTrace = new LOD(2) +} + +sealed trait LevelOfDetail +object LevelOfDetail { + case object MetricsOnly extends LevelOfDetail + case object SimpleTrace extends LevelOfDetail + case object FullTrace extends LevelOfDetail +} + +sealed trait Status +object Status { + case object Open extends Status + case object Closed extends Status + case object FinishedWithError extends Status + case object FinishedSuccessfully extends Status +} + +trait TraceContextAware extends Serializable { + def traceContext: TraceContext +} + +object TraceContextAware { + def default: TraceContextAware = new DefaultTraceContextAware + + class DefaultTraceContextAware extends TraceContextAware { + @transient val traceContext = Tracer.currentContext + + // + // Beware of this hack, it might bite us in the future! + // + // When using remoting/cluster all messages carry the TraceContext in the envelope in which they + // are sent but that doesn't apply to System Messages. We are certain that the TraceContext is + // available (if any) when the system messages are read and this will make sure that it is correctly + // captured and propagated. + @throws[ObjectStreamException] + private def readResolve: AnyRef = { + new DefaultTraceContextAware + } + } +} + +trait TimestampedTraceContextAware extends TraceContextAware { + def captureNanoTime: Long +} + +object TimestampedTraceContextAware { + def default: TimestampedTraceContextAware = new DefaultTraceContextAware with TimestampedTraceContextAware { + @transient val captureNanoTime = System.nanoTime() + } +} + +trait SegmentAware { + @volatile @transient var segment: Segment = EmptyTraceContext.EmptySegment +} + +object SegmentAware { + def default: SegmentAware = new DefaultSegmentAware + class DefaultSegmentAware extends DefaultTraceContextAware with SegmentAware {} +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala new file mode 100644 index 00000000..460e4b22 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceLocal.scala @@ -0,0 +1,64 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import kamon.trace.TraceLocal.TraceLocalKey +import kamon.util.Supplier +import scala.collection.concurrent.TrieMap + +object TraceLocal { + + trait TraceLocalKey[T] + + trait AvailableToMdc extends TraceLocalKey[String] { + def mdcKey: String + } + + object AvailableToMdc { + case class DefaultKeyAvailableToMdc(mdcKey: String) extends AvailableToMdc + + def fromKey(mdcKey: String): AvailableToMdc = DefaultKeyAvailableToMdc(mdcKey) + def apply(mdcKey: String): AvailableToMdc = fromKey(mdcKey) + } + + def store[T](key: TraceLocalKey[T])(value: Any): Unit = Tracer.currentContext match { + case ctx: MetricsOnlyContext ⇒ ctx.traceLocalStorage.store(key)(value) + case EmptyTraceContext ⇒ // Can't store in the empty context. + } + + def retrieve[T](key: TraceLocalKey[T]): Option[T] = Tracer.currentContext match { + case ctx: MetricsOnlyContext ⇒ ctx.traceLocalStorage.retrieve(key) + case EmptyTraceContext ⇒ None // Can't retrieve anything from the empty context. + } + + // Java variant + @throws(classOf[NoSuchElementException]) + def get[T](key: TraceLocalKey[T]): T = retrieve(key).get + + def getOrElse[T](key: TraceLocalKey[T], code: Supplier[T]): T = retrieve(key).getOrElse(code.get) + + def storeForMdc(key: String, value: String): Unit = store(AvailableToMdc.fromKey(key))(value) + + def newTraceLocalKey[T]: TraceLocalKey[T] = new TraceLocalKey[T] {} +} + +class TraceLocalStorage { + val underlyingStorage = TrieMap[TraceLocalKey[_], Any]() + + def store[T](key: TraceLocalKey[T])(value: Any): Unit = underlyingStorage.put(key, value) + def retrieve[T](key: TraceLocalKey[T]): Option[T] = underlyingStorage.get(key).map(_.asInstanceOf[T]) +} diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala new file mode 100644 index 00000000..c3a83e93 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSettings.scala @@ -0,0 +1,51 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import kamon.util.ConfigTools.Syntax +import com.typesafe.config.Config +import kamon.util.NanoInterval + +case class TraceSettings(levelOfDetail: LevelOfDetail, sampler: Sampler, tokenGenerator: () ⇒ String) + +object TraceSettings { + def apply(config: Config): TraceSettings = { + val tracerConfig = config.getConfig("kamon.trace") + + val detailLevel: LevelOfDetail = tracerConfig.getString("level-of-detail") match { + case "metrics-only" ⇒ LevelOfDetail.MetricsOnly + case "simple-trace" ⇒ LevelOfDetail.SimpleTrace + case other ⇒ sys.error(s"Unknown tracer level of detail [$other] present in the configuration file.") + } + + val sampler: Sampler = + if (detailLevel == LevelOfDetail.MetricsOnly) NoSampling + else tracerConfig.getString("sampling") match { + case "all" ⇒ SampleAll + case "random" ⇒ new RandomSampler(tracerConfig.getInt("random-sampler.chance")) + case "ordered" ⇒ new OrderedSampler(tracerConfig.getInt("ordered-sampler.sample-interval")) + case "threshold" ⇒ new ThresholdSampler(new NanoInterval(tracerConfig.getFiniteDuration("threshold-sampler.minimum-elapsed-time").toNanos)) + case "clock" ⇒ new ClockSampler(new NanoInterval(tracerConfig.getFiniteDuration("clock-sampler.pause").toNanos)) + } + + val dynamic = new akka.actor.ReflectiveDynamicAccess(getClass.getClassLoader) + val tokenGeneratorFQN = tracerConfig.getString("token-generator") + val tokenGenerator = dynamic.createInstanceFor[() ⇒ String](tokenGeneratorFQN, Nil).get // let's bubble up any problems. + + TraceSettings(detailLevel, sampler, tokenGenerator) + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala new file mode 100644 index 00000000..47455ec5 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/TraceSubscriptions.scala @@ -0,0 +1,45 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import akka.actor.{Terminated, ActorRef, Actor} + +class TraceSubscriptions extends Actor { + import TraceSubscriptions._ + + var subscribers: List[ActorRef] = Nil + + def receive = { + case Subscribe(newSubscriber) ⇒ + if (!subscribers.contains(newSubscriber)) + subscribers = context.watch(newSubscriber) :: subscribers + + case Unsubscribe(leavingSubscriber) ⇒ + subscribers = subscribers.filter(_ == leavingSubscriber) + + case Terminated(terminatedSubscriber) ⇒ + subscribers = subscribers.filter(_ == terminatedSubscriber) + + case trace: TraceInfo ⇒ + subscribers.foreach(_ ! trace) + } +} + +object TraceSubscriptions { + case class Subscribe(subscriber: ActorRef) + case class Unsubscribe(subscriber: ActorRef) +} diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala new file mode 100644 index 00000000..552962eb --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/TracerModule.scala @@ -0,0 +1,197 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import java.util + +import akka.actor._ +import akka.event.{Logging, LoggingAdapter} +import com.typesafe.config.Config +import kamon.Kamon +import kamon.metric.MetricsModule +import kamon.util._ + +import scala.collection.JavaConverters._ + +trait TracerModule { + def newContext(name: String): TraceContext + def newContext(name: String, token: Option[String]): TraceContext + def newContext(name: String, token: Option[String], tags: Map[String, String]): TraceContext + def newContext(name: String, token: Option[String], tags: Map[String, String], timestamp: RelativeNanoTimestamp, state: Status, isLocal: Boolean): TraceContext + + def subscribe(subscriber: ActorRef): Unit + def unsubscribe(subscriber: ActorRef): Unit +} + +object Tracer { + private[kamon] val _traceContextStorage = new ThreadLocal[TraceContext] { + override def initialValue(): TraceContext = EmptyTraceContext + } + + def currentContext: TraceContext = + _traceContextStorage.get() + + def setCurrentContext(context: TraceContext): Unit = + _traceContextStorage.set(context) + + def clearCurrentContext: Unit = + _traceContextStorage.remove() + + def withContext[T](context: TraceContext)(code: ⇒ T): T = { + val oldContext = _traceContextStorage.get() + _traceContextStorage.set(context) + + try code finally _traceContextStorage.set(oldContext) + } + + // Java variant. + def withContext[T](context: TraceContext, code: Supplier[T]): T = + withContext(context)(code.get) + + def withNewContext[T](traceName: String, traceToken: Option[String], tags: Map[String, String], autoFinish: Boolean)(code: ⇒ T): T = { + withContext(Kamon.tracer.newContext(traceName, traceToken, tags)) { + val codeResult = code + if (autoFinish) + currentContext.finish() + + codeResult + } + } + + def withNewContext[T](traceName: String)(code: ⇒ T): T = + withNewContext(traceName, None)(code) + + def withNewContext[T](traceName: String, tags: Map[String, String])(code: ⇒ T): T = + withNewContext(traceName, None, tags)(code) + + def withNewContext[T](traceName: String, traceToken: Option[String])(code: ⇒ T): T = + withNewContext(traceName, traceToken, Map.empty[String, String])(code) + + def withNewContext[T](traceName: String, traceToken: Option[String], tags: Map[String, String])(code: ⇒ T): T = + withNewContext(traceName, traceToken, tags, autoFinish = false)(code) + + def withNewContext[T](traceName: String, autoFinish: Boolean)(code: ⇒ T): T = + withNewContext(traceName, None, Map.empty[String, String], autoFinish)(code) + + def withNewContext[T](traceName: String, tags: Map[String, String], autoFinish: Boolean)(code: ⇒ T): T = + withNewContext(traceName, None, tags, autoFinish)(code) + + // Java variants. + def withNewContext[T](traceName: String, traceToken: Option[String], autoFinish: Boolean, code: Supplier[T]): T = + withNewContext(traceName, traceToken, Map.empty[String, String], autoFinish)(code.get) + + def withNewContext[T](traceName: String, traceToken: Option[String], tags: util.Map[String, String], autoFinish: Boolean, code: Supplier[T]): T = + withNewContext(traceName, traceToken, tags.asScala.toMap, autoFinish)(code.get) + + def withNewContext[T](traceName: String, code: Supplier[T]): T = + withNewContext(traceName, None, Map.empty[String, String], autoFinish = false)(code.get) + + def withNewContext[T](traceName: String, tags: util.Map[String, String], code: Supplier[T]): T = + withNewContext(traceName, None, tags.asScala.toMap, autoFinish = false)(code.get) + + def withNewContext[T](traceName: String, traceToken: Option[String], code: Supplier[T]): T = + withNewContext(traceName, traceToken, Map.empty[String, String], autoFinish = false)(code.get) + + def withNewContext[T](traceName: String, traceToken: Option[String], tags: util.Map[String, String], code: Supplier[T]): T = + withNewContext(traceName, traceToken, tags.asScala.toMap, autoFinish = false)(code.get) + + def withNewContext[T](traceName: String, autoFinish: Boolean, code: Supplier[T]): T = + withNewContext(traceName, None, Map.empty[String, String], autoFinish)(code.get) + + def withNewContext[T](traceName: String, tags: util.Map[String, String], autoFinish: Boolean, code: Supplier[T]): T = + withNewContext(traceName, None, tags.asScala.toMap, autoFinish)(code.get) +} + +private[kamon] class TracerModuleImpl(metricsExtension: MetricsModule, config: Config) extends TracerModule { + @volatile private var _settings = TraceSettings(config) + + private val _subscriptions = new LazyActorRef + private val _incubator = new LazyActorRef + + private def newToken: String = _settings.tokenGenerator() + + def newContext(name: String): TraceContext = + createTraceContext(name, None) + + def newContext(name: String, token: Option[String]): TraceContext = + createTraceContext(name, token) + + def newContext(name: String, token: Option[String], tags: Map[String, String]): TraceContext = + createTraceContext(name, token, tags) + + def newContext(name: String, token: Option[String], tags: Map[String, String], timestamp: RelativeNanoTimestamp, status: Status, isLocal: Boolean): TraceContext = + createTraceContext(name, token, tags, timestamp, status, isLocal) + + private def createTraceContext(traceName: String, token: Option[String], tags: Map[String, String] = Map.empty, startTimestamp: RelativeNanoTimestamp = RelativeNanoTimestamp.now, + status: Status = Status.Open, isLocal: Boolean = true): TraceContext = { + + def newMetricsOnlyContext(token: String): TraceContext = + new MetricsOnlyContext(traceName, token, tags, status, _settings.levelOfDetail, startTimestamp, _logger) + + val traceToken = token.getOrElse(newToken) + + _settings.levelOfDetail match { + case LevelOfDetail.MetricsOnly ⇒ + newMetricsOnlyContext(traceToken) + case _ if !isLocal || !_settings.sampler.shouldTrace ⇒ + newMetricsOnlyContext(traceToken) + case _ ⇒ + new TracingContext(traceName, traceToken, tags, currentStatus = Status.Open, _settings.levelOfDetail, isLocal, startTimestamp, _logger, dispatchTracingContext) + } + } + + def subscribe(subscriber: ActorRef): Unit = + _subscriptions.tell(TraceSubscriptions.Subscribe(subscriber)) + + def unsubscribe(subscriber: ActorRef): Unit = + _subscriptions.tell(TraceSubscriptions.Unsubscribe(subscriber)) + + private[kamon] def dispatchTracingContext(trace: TracingContext): Unit = + if (_settings.sampler.shouldReport(trace.elapsedTime)) + if (trace.shouldIncubate) + _incubator.tell(trace) + else + _subscriptions.tell(trace.generateTraceInfo) + + /** + * Tracer Extension initialization. + */ + private var _system: ActorSystem = null + private var _logger: LoggingAdapter = null + private lazy val _start = { + val subscriptions = _system.actorOf(Props[TraceSubscriptions], "trace-subscriptions") + _subscriptions.point(subscriptions) + _incubator.point(_system.actorOf(Incubator.props(subscriptions))) + } + + def start(system: ActorSystem, newConfig: Config): Unit = synchronized { + _settings = TraceSettings(newConfig) + _system = system + _logger = Logging(_system, "TracerModule") + _start + _system = null + } +} + +private[kamon] object TracerModuleImpl { + + def apply(metricsExtension: MetricsModule, config: Config) = + new TracerModuleImpl(metricsExtension, config) +} + +case class TraceInfo(name: String, token: String, timestamp: NanoTimestamp, elapsedTime: NanoInterval, metadata: Map[String, String], tags: Map[String, String], segments: List[SegmentInfo], status: Status) +case class SegmentInfo(name: String, category: String, library: String, timestamp: NanoTimestamp, elapsedTime: NanoInterval, metadata: Map[String, String], tags: Map[String, String], status: Status) \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala b/kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala new file mode 100644 index 00000000..ff128f85 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/TracingContext.scala @@ -0,0 +1,113 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.atomic.AtomicInteger + +import akka.event.LoggingAdapter +import kamon.util.{NanoInterval, NanoTimestamp, RelativeNanoTimestamp} + +import scala.collection.concurrent.TrieMap + +private[trace] class TracingContext( + traceName: String, + token: String, + traceTags: Map[String, String], + currentStatus: Status, + levelOfDetail: LevelOfDetail, + isLocal: Boolean, + startTimeztamp: RelativeNanoTimestamp, + log: LoggingAdapter, + traceInfoSink: TracingContext ⇒ Unit +) extends MetricsOnlyContext(traceName, token, traceTags, currentStatus, levelOfDetail, startTimeztamp, log) { + + private val _openSegments = new AtomicInteger(0) + private val _startTimestamp = NanoTimestamp.now + private val _allSegments = new ConcurrentLinkedQueue[TracingSegment]() + private val _metadata = TrieMap.empty[String, String] + + override def addMetadata(key: String, value: String): Unit = _metadata.put(key, value) + + override def startSegment(segmentName: String, category: String, library: String): Segment = { + startSegment(segmentName, category, library, Map.empty[String, String]) + } + + override def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = { + _openSegments.incrementAndGet() + val newSegment = new TracingSegment(segmentName, category, library, tags) + _allSegments.add(newSegment) + newSegment + } + + override def finish(): Unit = { + super.finish() + traceInfoSink(this) + } + + override def finishWithError(cause: Throwable): Unit = { + super.finishWithError(cause) + traceInfoSink(this) + } + + override def finishSegment(segmentName: String, category: String, library: String, duration: NanoInterval, tags: Map[String, String], isFinishedWithError: Boolean = false): Unit = { + _openSegments.decrementAndGet() + super.finishSegment(segmentName, category, library, duration, tags, isFinishedWithError) + } + + def shouldIncubate: Boolean = (Status.Open == status) || _openSegments.get() > 0 + + // Handle with care, should only be used after a trace is finished. + def generateTraceInfo: TraceInfo = { + require(isClosed, "Can't generated a TraceInfo if the Trace has not closed yet.") + + val currentSegments = _allSegments.iterator() + var segmentsInfo = List.newBuilder[SegmentInfo] + + while (currentSegments.hasNext) { + val segment = currentSegments.next() + if (segment.isClosed) + segmentsInfo += segment.createSegmentInfo(_startTimestamp, startTimestamp) + else + log.warning("Segment [{}] will be left out of TraceInfo because it was still open.", segment.name) + } + + TraceInfo(name, token, _startTimestamp, elapsedTime, _metadata.toMap, tags, segmentsInfo.result(), status) + } + + class TracingSegment( + segmentName: String, + category: String, + library: String, + segmentTags: Map[String, String] + ) extends MetricsOnlySegment(segmentName, category, library, segmentTags) { + + private val metadata = TrieMap.empty[String, String] + override def addMetadata(key: String, value: String): Unit = metadata.put(key, value) + + // Handle with care, should only be used after the segment has finished. + def createSegmentInfo(traceStartTimestamp: NanoTimestamp, traceRelativeTimestamp: RelativeNanoTimestamp): SegmentInfo = { + require(isClosed, "Can't generated a SegmentInfo if the Segment has not closed yet.") + + // We don't have a epoch-based timestamp for the segments because calling System.currentTimeMillis() is both + // expensive and inaccurate, but we can do that once for the trace and calculate all the segments relative to it. + val segmentStartTimestamp = new NanoTimestamp((this.startTimestamp.nanos - traceRelativeTimestamp.nanos) + traceStartTimestamp.nanos) + + SegmentInfo(this.name, category, library, segmentStartTimestamp, this.elapsedTime, metadata.toMap, tags, status) + } + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala b/kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala new file mode 100644 index 00000000..8177ed14 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala @@ -0,0 +1,26 @@ +/* =================================================== + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ========================================================== */ +package kamon.trace.logging + +import ch.qos.logback.classic.pattern.ClassicConverter +import ch.qos.logback.classic.spi.ILoggingEvent +import kamon.trace.Tracer + +class LogbackTraceTokenConverter extends ClassicConverter { + + def convert(event: ILoggingEvent): String = + Tracer.currentContext.collect(_.token).getOrElse("undefined") +} diff --git a/kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala b/kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala new file mode 100644 index 00000000..556366b0 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/trace/logging/MdcKeysSupport.scala @@ -0,0 +1,54 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace.logging + +import kamon.trace.TraceLocal.AvailableToMdc +import kamon.trace.{EmptyTraceContext, MetricsOnlyContext, TraceContext, Tracer} +import kamon.util.Supplier +import org.slf4j.MDC + +trait MdcKeysSupport { + + val traceTokenKey = "traceToken" + val traceNameKey = "traceName" + + private val defaultKeys = Seq(traceTokenKey, traceNameKey) + + def withMdc[A](thunk: ⇒ A): A = { + val keys = copyToMdc(Tracer.currentContext) + try thunk finally keys.foreach(key ⇒ MDC.remove(key)) + } + + // Java variant. + def withMdc[A](thunk: Supplier[A]): A = withMdc(thunk.get) + + private[kamon] def copyToMdc(traceContext: TraceContext): Iterable[String] = traceContext match { + case ctx: MetricsOnlyContext ⇒ + + // Add the default key value pairs for the trace token and trace name. + MDC.put(traceTokenKey, ctx.token) + MDC.put(traceNameKey, ctx.name) + + defaultKeys ++ ctx.traceLocalStorage.underlyingStorage.collect { + case (available: AvailableToMdc, value) ⇒ Map(available.mdcKey → String.valueOf(value)) + }.flatMap { value ⇒ value.map { case (k, v) ⇒ MDC.put(k, v); k } } + + case EmptyTraceContext ⇒ Iterable.empty[String] + } +} + +object MdcKeysSupport extends MdcKeysSupport \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala b/kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala new file mode 100644 index 00000000..cbf530e4 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/ConfigTools.scala @@ -0,0 +1,48 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import java.util.concurrent.TimeUnit + +import com.typesafe.config.Config + +import scala.concurrent.duration.FiniteDuration + +import kamon.metric.instrument.{Memory, Time} + +object ConfigTools { + implicit class Syntax(val config: Config) extends AnyVal { + // We are using the deprecated .getNanoseconds option to keep Kamon source code compatible with + // versions of Akka using older typesafe-config versions. + + def getFiniteDuration(path: String): FiniteDuration = + FiniteDuration(config.getDuration(path, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS) + + def firstLevelKeys: Set[String] = { + import scala.collection.JavaConverters._ + + config.entrySet().asScala.map { + case entry ⇒ entry.getKey.takeWhile(_ != '.') + } toSet + } + + def time(path: String): Time = Time(config.getString(path)) + + def memory(path: String): Memory = Memory(config.getString(path)) + } + +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala b/kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala new file mode 100644 index 00000000..7a94aefc --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/FastDispatch.scala @@ -0,0 +1,38 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import akka.actor.ActorRef + +import scala.concurrent.{ExecutionContext, Future} + +/** + * Extension for Future[ActorRef]. Try to dispatch a message to a Future[ActorRef] in the same thread if it has already + * completed or do the regular scheduling otherwise. Specially useful when using the ModuleSupervisor extension to + * create actors. + */ +object FastDispatch { + implicit class Syntax(val target: Future[ActorRef]) extends AnyVal { + + def fastDispatch(message: Any)(implicit ec: ExecutionContext): Unit = + if (target.isCompleted) + target.value.get.map(_ ! message) + else + target.map(_ ! message) + } + +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala b/kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala new file mode 100644 index 00000000..8dab6519 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/FunctionalInterfaces.scala @@ -0,0 +1,25 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +trait Supplier[T] { + def get: T +} + +trait Function[T, R] { + def apply(t: T): R +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala b/kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala new file mode 100644 index 00000000..90bece5c --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/JavaTags.scala @@ -0,0 +1,14 @@ +package kamon.util + +object JavaTags { + + /** + * Helper method to transform Java maps into Scala maps. Typically this will be used as a static import from + * Java code that wants to use tags, as tags are defined as scala.collection.mutable.Map[String, String] and + * creating them directly from Java is quite verbose. + */ + def tagsFromMap(tags: java.util.Map[String, String]): Map[String, String] = { + import scala.collection.JavaConversions._ + tags.toMap + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/Latency.scala b/kamon-core/src/legacy-main/scala/kamon/util/Latency.scala new file mode 100644 index 00000000..52e044f8 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/Latency.scala @@ -0,0 +1,29 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import kamon.metric.instrument.Histogram + +object Latency { + def measure[A](histogram: Histogram)(thunk: ⇒ A): A = { + val start = RelativeNanoTimestamp.now + try thunk finally { + val latency = NanoInterval.since(start).nanos + histogram.record(latency) + } + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala b/kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala new file mode 100644 index 00000000..a07abea6 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/LazyActorRef.scala @@ -0,0 +1,69 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import java.util +import java.util.concurrent.ConcurrentLinkedQueue + +import akka.actor.{Actor, ActorRef} +import org.HdrHistogram.WriterReaderPhaser + +import scala.annotation.tailrec + +/** + * A LazyActorRef accumulates messages sent to an actor that doesn't exist yet. Once the actor is created and + * the LazyActorRef is pointed to it, all the accumulated messages are flushed and any new message sent to the + * LazyActorRef will immediately be sent to the pointed ActorRef. + * + * This is intended to be used during Kamon's initialization where some components need to use ActorRefs to work + * (like subscriptions and the trace incubator) but our internal ActorSystem is not yet ready to create the + * required actors. + */ +class LazyActorRef { + private val _refPhaser = new WriterReaderPhaser + private val _backlog = new ConcurrentLinkedQueue[(Any, ActorRef)]() + @volatile private var _target: Option[ActorRef] = None + + def tell(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = { + val criticalEnter = _refPhaser.writerCriticalSectionEnter() + try { + _target.map(_.tell(message, sender)) getOrElse { + _backlog.add((message, sender)) + } + + } finally { _refPhaser.writerCriticalSectionExit(criticalEnter) } + } + + def point(target: ActorRef): Unit = { + @tailrec def drain(q: util.Queue[(Any, ActorRef)]): Unit = if (!q.isEmpty) { + val (msg, sender) = q.poll() + target.tell(msg, sender) + drain(q) + } + + try { + _refPhaser.readerLock() + + if (_target.isEmpty) { + _target = Some(target) + _refPhaser.flipPhase(1000L) + drain(_backlog) + + } else sys.error("A LazyActorRef cannot be pointed more than once.") + } finally { _refPhaser.readerUnlock() } + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala b/kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala new file mode 100644 index 00000000..6fc6fb15 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/MapMerge.scala @@ -0,0 +1,43 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +object MapMerge { + + /** + * Merge two immutable maps with the same key and value types, using the provided valueMerge function. + */ + implicit class Syntax[K, V](val map: Map[K, V]) extends AnyVal { + def merge(that: Map[K, V], valueMerge: (V, V) ⇒ V): Map[K, V] = { + val merged = Map.newBuilder[K, V] + + map.foreach { + case (key, value) ⇒ + val mergedValue = that.get(key).map(v ⇒ valueMerge(value, v)).getOrElse(value) + merged += key → mergedValue + } + + that.foreach { + case kv @ (key, _) if !map.contains(key) ⇒ merged += kv + case other ⇒ // ignore, already included. + } + + merged.result(); + } + } + +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala b/kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala new file mode 100644 index 00000000..1397050f --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/NeedToScale.scala @@ -0,0 +1,37 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import com.typesafe.config.Config +import kamon.metric.instrument.{Memory, Time} +import kamon.util.ConfigTools._ + +object NeedToScale { + val TimeUnits = "time-units" + val MemoryUnits = "memory-units" + + def unapply(config: Config): Option[(Option[Time], Option[Memory])] = { + val scaleTimeTo: Option[Time] = + if (config.hasPath(TimeUnits)) Some(config.time(TimeUnits)) else None + + val scaleMemoryTo: Option[Memory] = + if (config.hasPath(MemoryUnits)) Some(config.memory(MemoryUnits)) else None + if (scaleTimeTo.isDefined || scaleMemoryTo.isDefined) Some(scaleTimeTo → scaleMemoryTo) + else None + } +} + diff --git a/kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala b/kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala new file mode 100644 index 00000000..9019eb4c --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/PaddedAtomicLong.scala @@ -0,0 +1,25 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import java.util.concurrent.atomic.AtomicLong + +class PaddedAtomicLong(value: Long = 0) extends AtomicLong(value) { + @volatile var p1, p2, p3, p4, p5, p6 = 7L + + protected def sumPaddingToPreventOptimisation() = p1 + p2 + p3 + p4 + p5 + p6 +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala b/kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala new file mode 100644 index 00000000..50e14ace --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/PathFilter.scala @@ -0,0 +1,5 @@ +package kamon.util + +trait PathFilter { + def accept(path: String): Boolean +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala b/kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala new file mode 100644 index 00000000..848fca87 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/RegexPathFilter.scala @@ -0,0 +1,27 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +case class RegexPathFilter(path: String) extends PathFilter { + private val pathRegex = path.r + override def accept(path: String): Boolean = { + path match { + case pathRegex(_*) ⇒ true + case _ ⇒ false + } + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala b/kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala new file mode 100644 index 00000000..5fb0d066 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/SameThreadExecutionContext.scala @@ -0,0 +1,30 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import kamon.util.logger.LazyLogger +import scala.concurrent.ExecutionContext + +/** + * For small code blocks that don't need to be run on a separate thread. + */ +object SameThreadExecutionContext extends ExecutionContext { + val logger = LazyLogger("SameThreadExecutionContext") + + override def execute(runnable: Runnable): Unit = runnable.run + override def reportFailure(t: Throwable): Unit = logger.error(t.getMessage, t) +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala b/kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala new file mode 100644 index 00000000..f368e54f --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/Sequencer.scala @@ -0,0 +1,40 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +/** + * This class implements an extremely efficient, thread-safe way to generate a + * incrementing sequence of Longs with a simple Long overflow protection. + */ +class Sequencer { + private val CloseToOverflow = Long.MaxValue - 1000000000 + private val sequenceNumber = new PaddedAtomicLong(1L) + + def next(): Long = { + val current = sequenceNumber.getAndIncrement + + // check if this value is getting close to overflow? + if (current > CloseToOverflow) { + sequenceNumber.set(current - CloseToOverflow) // we need maintain the order + } + current + } +} + +object Sequencer { + def apply(): Sequencer = new Sequencer() +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala b/kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala new file mode 100644 index 00000000..002dc470 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/Timestamp.scala @@ -0,0 +1,107 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +/** + * Epoch time stamp. + */ +case class Timestamp(seconds: Long) extends AnyVal { + def <(that: Timestamp): Boolean = this.seconds < that.seconds + def >(that: Timestamp): Boolean = this.seconds > that.seconds + def ==(that: Timestamp): Boolean = this.seconds == that.seconds + def >=(that: Timestamp): Boolean = this.seconds >= that.seconds + def <=(that: Timestamp): Boolean = this.seconds <= that.seconds + + override def toString: String = String.valueOf(seconds) + ".seconds" +} + +object Timestamp { + def now: Timestamp = new Timestamp(System.currentTimeMillis() / 1000) + def earlier(l: Timestamp, r: Timestamp): Timestamp = if (l <= r) l else r + def later(l: Timestamp, r: Timestamp): Timestamp = if (l >= r) l else r +} + +/** + * Epoch time stamp in milliseconds. + */ +case class MilliTimestamp(millis: Long) extends AnyVal { + override def toString: String = String.valueOf(millis) + ".millis" + + def toTimestamp: Timestamp = new Timestamp(millis / 1000) + def toRelativeNanoTimestamp: RelativeNanoTimestamp = { + val diff = (System.currentTimeMillis() - millis) * 1000000 + new RelativeNanoTimestamp(System.nanoTime() - diff) + } +} + +object MilliTimestamp { + def now: MilliTimestamp = new MilliTimestamp(System.currentTimeMillis()) +} + +/** + * Epoch time stamp in nanoseconds. + * + * NOTE: This doesn't have any better precision than MilliTimestamp, it is just a convenient way to get a epoch + * timestamp in nanoseconds. + */ +case class NanoTimestamp(nanos: Long) extends AnyVal { + def -(that: NanoTimestamp) = new NanoTimestamp(nanos - that.nanos) + def +(that: NanoTimestamp) = new NanoTimestamp(nanos + that.nanos) + override def toString: String = String.valueOf(nanos) + ".nanos" +} + +object NanoTimestamp { + def now: NanoTimestamp = new NanoTimestamp(System.currentTimeMillis() * 1000000) +} + +/** + * Number of nanoseconds between a arbitrary origin timestamp provided by the JVM via System.nanoTime() + */ +case class RelativeNanoTimestamp(nanos: Long) extends AnyVal { + def -(that: RelativeNanoTimestamp) = new RelativeNanoTimestamp(nanos - that.nanos) + def +(that: RelativeNanoTimestamp) = new RelativeNanoTimestamp(nanos + that.nanos) + override def toString: String = String.valueOf(nanos) + ".nanos" + + def toMilliTimestamp: MilliTimestamp = + new MilliTimestamp(System.currentTimeMillis - ((System.nanoTime - nanos) / 1000000)) +} + +object RelativeNanoTimestamp { + val zero = new RelativeNanoTimestamp(0L) + + def now: RelativeNanoTimestamp = new RelativeNanoTimestamp(System.nanoTime()) + def relativeTo(milliTimestamp: MilliTimestamp): RelativeNanoTimestamp = + new RelativeNanoTimestamp(now.nanos - (MilliTimestamp.now.millis - milliTimestamp.millis) * 1000000) +} + +/** + * Number of nanoseconds that passed between two points in time. + */ +case class NanoInterval(nanos: Long) extends AnyVal { + def <(that: NanoInterval): Boolean = this.nanos < that.nanos + def >(that: NanoInterval): Boolean = this.nanos > that.nanos + def ==(that: NanoInterval): Boolean = this.nanos == that.nanos + def >=(that: NanoInterval): Boolean = this.nanos >= that.nanos + def <=(that: NanoInterval): Boolean = this.nanos <= that.nanos + + override def toString: String = String.valueOf(nanos) + ".nanos" +} + +object NanoInterval { + def default: NanoInterval = new NanoInterval(0L) + def since(relative: RelativeNanoTimestamp): NanoInterval = new NanoInterval(System.nanoTime() - relative.nanos) +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala b/kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala new file mode 100644 index 00000000..d1197a5a --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala @@ -0,0 +1,45 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ +package kamon.util + +import scala.collection.concurrent.TrieMap + +object TriemapAtomicGetOrElseUpdate { + + /** + * Workaround to the non thread-safe [[scala.collection.concurrent.TrieMap#getOrElseUpdate]] method. More details on + * why this is necessary can be found at [[https://issues.scala-lang.org/browse/SI-7943]]. + */ + implicit class Syntax[K, V](val trieMap: TrieMap[K, V]) extends AnyVal { + + def atomicGetOrElseUpdate(key: K, op: ⇒ V): V = + atomicGetOrElseUpdate(key, op, { v: V ⇒ Unit }) + + def atomicGetOrElseUpdate(key: K, op: ⇒ V, cleanup: V ⇒ Unit): V = + trieMap.get(key) match { + case Some(v) ⇒ v + case None ⇒ + val d = op + trieMap.putIfAbsent(key, d).map { oldValue ⇒ + // If there was an old value then `d` was never added + // and thus need to be cleanup. + cleanup(d) + oldValue + + } getOrElse (d) + } + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala new file mode 100644 index 00000000..a4bca570 --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorMetricRecorder.scala @@ -0,0 +1,99 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util.executors + +import kamon.metric.{EntityRecorderFactory, GenericEntityRecorder} +import kamon.metric.instrument.{Gauge, MinMaxCounter, DifferentialValueCollector, InstrumentFactory} +import java.util.concurrent.{ForkJoinPool ⇒ JavaForkJoinPool, ThreadPoolExecutor} +import kamon.util.executors.ForkJoinPools.ForkJoinMetrics + +import scala.concurrent.forkjoin.ForkJoinPool + +object ForkJoinPools extends ForkJoinLowPriority { + trait ForkJoinMetrics[T] { + def getParallelism(fjp: T): Long + def getPoolSize(fjp: T): Long + def getActiveThreadCount(fjp: T): Long + def getRunningThreadCount(fjp: T): Long + def getQueuedTaskCount(fjp: T): Long + def getQueuedSubmissionCount(fjp: T): Long + } + + implicit object JavaForkJoin extends ForkJoinMetrics[JavaForkJoinPool] { + def getParallelism(fjp: JavaForkJoinPool) = fjp.getParallelism + def getPoolSize(fjp: JavaForkJoinPool) = fjp.getPoolSize.toLong + def getRunningThreadCount(fjp: JavaForkJoinPool) = fjp.getActiveThreadCount.toLong + def getActiveThreadCount(fjp: JavaForkJoinPool) = fjp.getRunningThreadCount.toLong + def getQueuedTaskCount(fjp: JavaForkJoinPool) = fjp.getQueuedTaskCount + def getQueuedSubmissionCount(fjp: JavaForkJoinPool) = fjp.getQueuedSubmissionCount + } +} + +trait ForkJoinLowPriority { + implicit object ScalaForkJoin extends ForkJoinMetrics[ForkJoinPool] { + def getParallelism(fjp: ForkJoinPool) = fjp.getParallelism + def getPoolSize(fjp: ForkJoinPool) = fjp.getPoolSize.toLong + def getRunningThreadCount(fjp: ForkJoinPool) = fjp.getActiveThreadCount.toLong + def getActiveThreadCount(fjp: ForkJoinPool) = fjp.getRunningThreadCount.toLong + def getQueuedTaskCount(fjp: ForkJoinPool) = fjp.getQueuedTaskCount + def getQueuedSubmissionCount(fjp: ForkJoinPool) = fjp.getQueuedSubmissionCount + } +} + +abstract class ForkJoinPoolMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { + def paralellism: MinMaxCounter + def poolSize: Gauge + def activeThreads: Gauge + def runningThreads: Gauge + def queuedTaskCount: Gauge + def queuedSubmissionCount: Gauge +} + +object ForkJoinPoolMetrics { + def factory[T: ForkJoinMetrics](fjp: T, categoryName: String) = new EntityRecorderFactory[ForkJoinPoolMetrics] { + val forkJoinMetrics = implicitly[ForkJoinMetrics[T]] + + def category: String = categoryName + def createRecorder(instrumentFactory: InstrumentFactory) = new ForkJoinPoolMetrics(instrumentFactory) { + val paralellism = minMaxCounter("parallelism") + paralellism.increment(forkJoinMetrics.getParallelism(fjp)) // Steady value. + + val poolSize = gauge("pool-size", forkJoinMetrics.getPoolSize(fjp)) + val activeThreads = gauge("active-threads", forkJoinMetrics.getActiveThreadCount(fjp)) + val runningThreads = gauge("running-threads", forkJoinMetrics.getRunningThreadCount(fjp)) + val queuedTaskCount = gauge("queued-task-count", forkJoinMetrics.getQueuedTaskCount(fjp)) + val queuedSubmissionCount = gauge("queued-submission-count", forkJoinMetrics.getQueuedSubmissionCount(fjp)) + } + } +} + +class ThreadPoolExecutorMetrics(tpe: ThreadPoolExecutor, instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { + val corePoolSize = gauge("core-pool-size", tpe.getCorePoolSize.toLong) + val maxPoolSize = gauge("max-pool-size", tpe.getMaximumPoolSize.toLong) + val poolSize = gauge("pool-size", tpe.getPoolSize.toLong) + val activeThreads = gauge("active-threads", tpe.getActiveCount.toLong) + val processedTasks = gauge("processed-tasks", DifferentialValueCollector(() ⇒ { + tpe.getTaskCount + })) +} + +object ThreadPoolExecutorMetrics { + def factory(tpe: ThreadPoolExecutor, cat: String) = new EntityRecorderFactory[ThreadPoolExecutorMetrics] { + def category: String = cat + def createRecorder(instrumentFactory: InstrumentFactory) = new ThreadPoolExecutorMetrics(tpe, instrumentFactory) + } +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala new file mode 100644 index 00000000..60612beb --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/executors/ExecutorServiceMetrics.scala @@ -0,0 +1,134 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util.executors + +import java.util.concurrent.{ExecutorService, ForkJoinPool ⇒ JavaForkJoinPool, ThreadPoolExecutor} + +import kamon.Kamon +import kamon.metric.Entity + +import scala.concurrent.forkjoin.ForkJoinPool +import scala.util.control.NoStackTrace + +object ExecutorServiceMetrics { + val Category = "executor-service" + + private val DelegatedExecutor = Class.forName("java.util.concurrent.Executors$DelegatedExecutorService") + private val FinalizableDelegated = Class.forName("java.util.concurrent.Executors$FinalizableDelegatedExecutorService") + private val DelegateScheduled = Class.forName("java.util.concurrent.Executors$DelegatedScheduledExecutorService") + private val JavaForkJoinPool = classOf[JavaForkJoinPool] + private val ScalaForkJoinPool = classOf[ForkJoinPool] + + private val executorField = { + // executorService is private :( + val field = DelegatedExecutor.getDeclaredField("e") + field.setAccessible(true) + field + } + + /** + * + * Register the [[http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html ThreadPoolExecutor]] to Monitor. + * + * @param name The name of the [[ThreadPoolExecutor]] + * @param threadPool The intance of the [[ThreadPoolExecutor]] + * @param tags The tags associated to the [[ThreadPoolExecutor]] + */ + @inline private def registerThreadPool(name: String, threadPool: ThreadPoolExecutor, tags: Map[String, String]): Entity = { + val threadPoolEntity = Entity(name, Category, tags + ("executor-type" → "thread-pool-executor")) + Kamon.metrics.entity(ThreadPoolExecutorMetrics.factory(threadPool, Category), threadPoolEntity) + threadPoolEntity + } + + /** + * + * Register the [[http://www.scala-lang.org/api/current/index.html#scala.collection.parallel.TaskSupport ForkJoinPool]] to Monitor. + * + * @param name The name of the [[ForkJoinPool]] + * @param forkJoinPool The instance of the [[ForkJoinPool]] + * @param tags The tags associated to the [[ForkJoinPool]] + */ + @inline private def registerScalaForkJoin(name: String, forkJoinPool: ForkJoinPool, tags: Map[String, String]): Entity = { + val forkJoinEntity = Entity(name, Category, tags + ("executor-type" → "fork-join-pool")) + Kamon.metrics.entity(ForkJoinPoolMetrics.factory(forkJoinPool, Category), forkJoinEntity) + forkJoinEntity + } + + /** + * + * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ForkJoinPool.html JavaForkJoinPool]] to Monitor. + * + * @param name The name of the [[JavaForkJoinPool]] + * @param forkJoinPool The instance of the [[JavaForkJoinPool]] + * @param tags The tags associated to the [[JavaForkJoinPool]] + */ + @inline private def registerJavaForkJoin(name: String, forkJoinPool: JavaForkJoinPool, tags: Map[String, String]): Entity = { + val forkJoinEntity = Entity(name, Category, tags + ("executor-type" → "fork-join-pool")) + Kamon.metrics.entity(ForkJoinPoolMetrics.factory(forkJoinPool, Category), forkJoinEntity) + forkJoinEntity + } + + /** + * + * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html ExecutorService]] to Monitor. + * + * @param name The name of the [[ExecutorService]] + * @param executorService The instance of the [[ExecutorService]] + * @param tags The tags associated to the [[ExecutorService]] + */ + def register(name: String, executorService: ExecutorService, tags: Map[String, String]): Entity = executorService match { + case threadPoolExecutor: ThreadPoolExecutor ⇒ registerThreadPool(name, threadPoolExecutor, tags) + case scalaForkJoinPool: ForkJoinPool if scalaForkJoinPool.getClass.isAssignableFrom(ScalaForkJoinPool) ⇒ registerScalaForkJoin(name, scalaForkJoinPool, tags) + case javaForkJoinPool: JavaForkJoinPool if javaForkJoinPool.getClass.isAssignableFrom(JavaForkJoinPool) ⇒ registerJavaForkJoin(name, javaForkJoinPool, tags) + case delegatedExecutor: ExecutorService if delegatedExecutor.getClass.isAssignableFrom(DelegatedExecutor) ⇒ registerDelegatedExecutor(name, delegatedExecutor, tags) + case delegatedScheduledExecutor: ExecutorService if delegatedScheduledExecutor.getClass.isAssignableFrom(DelegateScheduled) ⇒ registerDelegatedExecutor(name, delegatedScheduledExecutor, tags) + case finalizableDelegatedExecutor: ExecutorService if finalizableDelegatedExecutor.getClass.isAssignableFrom(FinalizableDelegated) ⇒ registerDelegatedExecutor(name, finalizableDelegatedExecutor, tags) + case other ⇒ throw NotSupportedException(s"The ExecutorService $name is not supported.") + } + + //Java variant + def register(name: String, executorService: ExecutorService, tags: java.util.Map[String, String]): Entity = { + import scala.collection.JavaConverters._ + register(name, executorService, tags.asScala.toMap) + } + + /** + * + * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html ExecutorService]] to Monitor. + * + * @param name The name of the [[ExecutorService]] + * @param executorService The instance of the [[ExecutorService]] + */ + def register(name: String, executorService: ExecutorService): Entity = { + register(name, executorService, Map.empty[String, String]) + } + + def remove(entity: Entity): Unit = Kamon.metrics.removeEntity(entity) + + /** + * INTERNAL USAGE ONLY + */ + private def registerDelegatedExecutor(name: String, executor: ExecutorService, tags: Map[String, String]) = { + val underlyingExecutor = executorField.get(executor) match { + case executorService: ExecutorService ⇒ executorService + case other ⇒ other + } + register(name, underlyingExecutor.asInstanceOf[ExecutorService], tags) + } + + case class NotSupportedException(message: String) extends RuntimeException with NoStackTrace +} \ No newline at end of file diff --git a/kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala b/kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala new file mode 100644 index 00000000..929fef4f --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/http/HttpServerMetrics.scala @@ -0,0 +1,41 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util.http + +import kamon.metric.{EntityRecorderFactory, GenericEntityRecorder} +import kamon.metric.instrument.InstrumentFactory + +/** + * Counts HTTP response status codes into per status code and per trace name + status counters. If recording a HTTP + * response with status 500 for the trace "GetUser", the counter with name "500" as well as the counter with name + * "GetUser_500" will be incremented. + */ +class HttpServerMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { + + def recordResponse(statusCode: String): Unit = + counter(statusCode).increment() + + def recordResponse(traceName: String, statusCode: String): Unit = { + recordResponse(statusCode) + counter(traceName + "_" + statusCode).increment() + } +} + +object HttpServerMetrics extends EntityRecorderFactory[HttpServerMetrics] { + def category: String = "http-server" + def createRecorder(instrumentFactory: InstrumentFactory): HttpServerMetrics = new HttpServerMetrics(instrumentFactory) +} diff --git a/kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala b/kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala new file mode 100644 index 00000000..11be7bbe --- /dev/null +++ b/kamon-core/src/legacy-main/scala/kamon/util/logger/LazyLogger.scala @@ -0,0 +1,49 @@ +/* ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util.logger + +import org.slf4j.{Logger ⇒ SLF4JLogger} + +class LazyLogger(val logger: SLF4JLogger) { + + @inline final def isTraceEnabled = logger.isTraceEnabled + @inline final def trace(msg: ⇒ String): Unit = if (isTraceEnabled) logger.trace(msg.toString) + @inline final def trace(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isTraceEnabled) logger.trace(msg, t) + + @inline final def isDebugEnabled = logger.isDebugEnabled + @inline final def debug(msg: ⇒ String): Unit = if (isDebugEnabled) logger.debug(msg.toString) + @inline final def debug(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isDebugEnabled) logger.debug(msg, t) + + @inline final def isErrorEnabled = logger.isErrorEnabled + @inline final def error(msg: ⇒ String): Unit = if (isErrorEnabled) logger.error(msg.toString) + @inline final def error(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isErrorEnabled) logger.error(msg, t) + + @inline final def isInfoEnabled = logger.isInfoEnabled + @inline final def info(msg: ⇒ String): Unit = if (isInfoEnabled) logger.info(msg.toString) + @inline final def info(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isInfoEnabled) logger.info(msg, t) + + @inline final def isWarnEnabled = logger.isWarnEnabled + @inline final def warn(msg: ⇒ String): Unit = if (isWarnEnabled) logger.warn(msg.toString) + @inline final def warn(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isWarnEnabled) logger.warn(msg, t) +} + +object LazyLogger { + import scala.reflect.{classTag, ClassTag} + + def apply(name: String): LazyLogger = new LazyLogger(org.slf4j.LoggerFactory.getLogger(name)) + def apply(cls: Class[_]): LazyLogger = apply(cls.getName) + def apply[C: ClassTag](): LazyLogger = apply(classTag[C].runtimeClass.getName) +} \ No newline at end of file diff --git a/kamon-core/src/legacy-test/resources/application.conf b/kamon-core/src/legacy-test/resources/application.conf new file mode 100644 index 00000000..bf6123d9 --- /dev/null +++ b/kamon-core/src/legacy-test/resources/application.conf @@ -0,0 +1,26 @@ +akka { + loggers = ["akka.event.slf4j.Slf4jLogger"] +} + +kamon { + metric { + tick-interval = 1 hour + default-collection-context-buffer-size = 100 + } + + trace { + level-of-detail = simple-trace + sampling = all + } + + default-tags = { + name = "jason" + number = 42 + username = ${USER} + list = [1, 2, 3] // lists do not make sense for a tag + object = { + nested-bool = true + nested-string = "a string" + } + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-test/resources/logback.xml b/kamon-core/src/legacy-test/resources/logback.xml new file mode 100644 index 00000000..b467456f --- /dev/null +++ b/kamon-core/src/legacy-test/resources/logback.xml @@ -0,0 +1,12 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + \ No newline at end of file diff --git a/kamon-core/src/legacy-test/scala/kamon/KamonLifecycleSpec.scala b/kamon-core/src/legacy-test/scala/kamon/KamonLifecycleSpec.scala new file mode 100644 index 00000000..2fbd1b71 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/KamonLifecycleSpec.scala @@ -0,0 +1,93 @@ +package kamon + +import akka.actor.ActorSystem +import akka.testkit.{TestKitBase, TestProbe} +import com.typesafe.config.ConfigFactory +import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot +import kamon.metric.{EntitySnapshot, SubscriptionsDispatcher} +import kamon.util.LazyActorRef +import org.scalatest.{Matchers, WordSpecLike} +import org.scalactic.TimesOnInt._ + +import scala.concurrent.duration._ + +class KamonLifecycleSpec extends TestKitBase with WordSpecLike with Matchers { + override implicit lazy val system: ActorSystem = ActorSystem("kamon-lifecycle-spec") + + "The Kamon lifecycle" should { + "allow Kamon to be used before it gets started" in { + val someMetric = Kamon.metrics.histogram("allow-me-before-start") + } + + "allow Kamon to be started/shutdown several times" in { + 10 times { + Kamon.shutdown() + Kamon.start() + Kamon.start() + Kamon.shutdown() + Kamon.shutdown() + } + } + + "not dispatch subscriptions before Kamon startup" in { + val subscriber = TestProbe() + Kamon.metrics.histogram("only-after-startup").record(100) + Kamon.metrics.subscribe("**", "**", subscriber.ref, permanently = true) + + flushSubscriptions() + subscriber.expectNoMsg(300 millis) + + Kamon.metrics.histogram("only-after-startup").record(100) + Kamon.start() + flushSubscriptions() + subscriber.expectMsgType[TickMetricSnapshot] + Kamon.shutdown() + } + + "not dispatch subscriptions after Kamon shutdown" in { + val subscriber = TestProbe() + Kamon.start() + Kamon.metrics.histogram("only-before-shutdown").record(100) + Kamon.metrics.subscribe("**", "**", subscriber.ref, permanently = true) + + flushSubscriptions() + subscriber.expectMsgType[TickMetricSnapshot] + + Kamon.metrics.histogram("only-before-shutdown").record(100) + Kamon.shutdown() + Thread.sleep(500) + flushSubscriptions() + subscriber.expectNoMsg(300 millis) + } + + "reconfigure filters after being started" in { + val customConfig = ConfigFactory.parseString( + """ + |kamon.metric.filters.histogram { + | includes = [ "**" ] + | excludes = ["untracked-histogram"] + |} + """.stripMargin + ) + + Kamon.metrics.shouldTrack("untracked-histogram", "histogram") shouldBe true + Kamon.start(customConfig.withFallback(ConfigFactory.load())) + Kamon.metrics.shouldTrack("untracked-histogram", "histogram") shouldBe false + + } + } + + def takeSnapshotOf(name: String, category: String): EntitySnapshot = { + val collectionContext = Kamon.metrics.buildDefaultCollectionContext + val recorder = Kamon.metrics.find(name, category).get + recorder.collect(collectionContext) + } + + def flushSubscriptions(): Unit = { + val subscriptionsField = Kamon.metrics.getClass.getDeclaredField("_subscriptions") + subscriptionsField.setAccessible(true) + val subscriptions = subscriptionsField.get(Kamon.metrics).asInstanceOf[LazyActorRef] + + subscriptions.tell(SubscriptionsDispatcher.Tick) + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/MetricScaleDecoratorSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/MetricScaleDecoratorSpec.scala new file mode 100644 index 00000000..d3d0f4fb --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/MetricScaleDecoratorSpec.scala @@ -0,0 +1,102 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import kamon.Kamon +import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot +import kamon.metric.instrument.{InstrumentFactory, Memory, Time, UnitOfMeasurement} +import kamon.testkit.BaseKamonSpec +import kamon.util.MilliTimestamp +import org.scalatest.OptionValues._ + +class MetricScaleDecoratorSpec extends BaseKamonSpec("metrics-scale-decorator-spec") with SnapshotFixtures { + "the MetricScaleDecorator" when { + "receives a snapshot" which { + + val scaleDecorator = system.actorOf(MetricScaleDecorator.props( + Some(Time.Milliseconds), Some(Memory.KiloBytes), testActor + )) + "is empty" should { + "do nothing for empty snapshots" in { + scaleDecorator ! emptySnapshot + expectMsg(emptySnapshot) + } + } + "is non empty" should { + scaleDecorator ! nonEmptySnapshot + val scaled = expectMsgType[TickMetricSnapshot] + val snapshot = scaled.metrics(testEntity) + + "scale time metrics" in { + snapshot.histogram("nano-time").value.max should be(10L +- 1L) + snapshot.counter("micro-time").value.count should be(1000L) + } + "scale memory metrics" in { + snapshot.histogram("byte-memory").value.max should be(1) + snapshot.counter("kbyte-memory").value.count should be(100L) + } + "do nothing with unknown metrics" in { + snapshot.histogram("unknown-histogram").value.max should be(1000L) + snapshot.counter("unknown-counter").value.count should be(10L) + } + "not change from and to" in { + scaled.from.millis should be(1000) + scaled.to.millis should be(2000) + } + } + } + } +} + +trait SnapshotFixtures { + self: BaseKamonSpec ⇒ + + class ScaleDecoratorTestMetrics(instrumentFactory: InstrumentFactory) + extends GenericEntityRecorder(instrumentFactory) { + val nanoTime = histogram("nano-time", Time.Nanoseconds) + val microTime = counter("micro-time", Time.Microseconds) + val byteMemory = histogram("byte-memory", Memory.Bytes) + val kbyteMemory = counter("kbyte-memory", Memory.KiloBytes) + val unknownHistogram = histogram("unknown-histogram", UnitOfMeasurement.Unknown) + val unknownCounter = counter("unknown-counter", UnitOfMeasurement.Unknown) + } + + object ScaleDecoratorTestMetrics extends EntityRecorderFactory[ScaleDecoratorTestMetrics] { + override def category: String = "decorator-spec" + + override def createRecorder(instrumentFactory: InstrumentFactory): ScaleDecoratorTestMetrics = + new ScaleDecoratorTestMetrics(instrumentFactory) + } + + val testEntity = Entity("metrics-scale-decorator-spec", "decorator-spec") + val recorder = Kamon.metrics.entity(ScaleDecoratorTestMetrics, "metrics-scale-decorator-spec") + + val emptySnapshot = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map.empty) + + recorder.unknownCounter.increment(10) + recorder.unknownHistogram.record(1000L) + recorder.nanoTime.record(10000000L) + recorder.microTime.increment(1000000L) + recorder.byteMemory.record(1024L) + recorder.kbyteMemory.increment(100L) + + val nonEmptySnapshot = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map( + (testEntity → recorder.collect(collectionContext)) + )) + +} + diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/SimpleMetricsSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/SimpleMetricsSpec.scala new file mode 100644 index 00000000..6d753888 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/SimpleMetricsSpec.scala @@ -0,0 +1,106 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import kamon.Kamon +import kamon.metric.instrument.Histogram.DynamicRange +import kamon.testkit.BaseKamonSpec +import scala.concurrent.duration._ + +class SimpleMetricsSpec extends BaseKamonSpec("simple-metrics-spec") { + + "the SimpleMetrics extension" should { + + "allow registering a fully configured Histogram and get the same Histogram if registering again" in { + val histogramA = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 10000, 2)) + val histogramB = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 10000, 2)) + + histogramA shouldBe theSameInstanceAs(histogramB) + } + + "return the original Histogram when registering a fully configured Histogram for second time but with different settings" in { + val histogramA = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 10000, 2)) + val histogramB = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 50000, 2)) + + histogramA shouldBe theSameInstanceAs(histogramB) + } + + "allow registering a Histogram that takes the default configuration from the kamon.metrics.precision settings" in { + Kamon.metrics.histogram("histogram-with-default-configuration") + } + + "allow registering a Counter and get the same Counter if registering again" in { + val counterA = Kamon.metrics.counter("counter") + val counterB = Kamon.metrics.counter("counter") + + counterA shouldBe theSameInstanceAs(counterB) + } + + "allow registering a fully configured MinMaxCounter and get the same MinMaxCounter if registering again" in { + val minMaxCounterA = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 10000, 2), 1 second) + val minMaxCounterB = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 10000, 2), 1 second) + + minMaxCounterA shouldBe theSameInstanceAs(minMaxCounterB) + } + + "return the original MinMaxCounter when registering a fully configured MinMaxCounter for second time but with different settings" in { + val minMaxCounterA = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 10000, 2), 1 second) + val minMaxCounterB = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 50000, 2), 1 second) + + minMaxCounterA shouldBe theSameInstanceAs(minMaxCounterB) + } + + "allow registering a MinMaxCounter that takes the default configuration from the kamon.metrics.precision settings" in { + Kamon.metrics.minMaxCounter("min-max-counter-with-default-configuration") + } + + "allow registering a fully configured Gauge and get the same Gauge if registering again" in { + val gaugeA = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) + val gaugeB = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) + + gaugeA shouldBe theSameInstanceAs(gaugeB) + } + + "return the original Gauge when registering a fully configured Gauge for second time but with different settings" in { + val gaugeA = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) + val gaugeB = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) + + gaugeA shouldBe theSameInstanceAs(gaugeB) + } + + "allow registering a Gauge that takes the default configuration from the kamon.metrics.precision settings" in { + Kamon.metrics.gauge("gauge-with-default-configuration")(2L) + } + + "allow un-registering user metrics" in { + val counter = Kamon.metrics.counter("counter-for-remove") + val histogram = Kamon.metrics.histogram("histogram-for-remove") + val minMaxCounter = Kamon.metrics.minMaxCounter("min-max-counter-for-remove") + val gauge = Kamon.metrics.gauge("gauge-for-remove")(2L) + + Kamon.metrics.removeCounter("counter-for-remove") + Kamon.metrics.removeHistogram("histogram-for-remove") + Kamon.metrics.removeMinMaxCounter("min-max-counter-for-remove") + Kamon.metrics.removeGauge("gauge-for-remove") + + counter should not be (theSameInstanceAs(Kamon.metrics.counter("counter-for-remove"))) + histogram should not be (theSameInstanceAs(Kamon.metrics.histogram("histogram-for-remove"))) + minMaxCounter should not be (theSameInstanceAs(Kamon.metrics.minMaxCounter("min-max-counter-for-remove"))) + gauge should not be (theSameInstanceAs(Kamon.metrics.gauge("gauge-for-remove")(2L))) + } + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/SubscriptionsProtocolSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/SubscriptionsProtocolSpec.scala new file mode 100644 index 00000000..ee34acc7 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/SubscriptionsProtocolSpec.scala @@ -0,0 +1,150 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import akka.actor._ +import akka.testkit.{TestProbe, ImplicitSender} +import com.typesafe.config.ConfigFactory +import kamon.Kamon +import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot +import kamon.testkit.BaseKamonSpec +import scala.concurrent.duration._ + +class SubscriptionsProtocolSpec extends BaseKamonSpec("subscriptions-protocol-spec") with ImplicitSender { + override lazy val config = + ConfigFactory.parseString( + """ + |kamon.metric { + | tick-interval = 1 hour + |} + """.stripMargin + ) + + lazy val metricsModule = Kamon.metrics + import metricsModule.{entity, subscribe, unsubscribe} + val defaultTags: Map[String, String] = Kamon.metrics.defaultTags + + "the Subscriptions messaging protocol" should { + "allow subscribing for a single tick" in { + val subscriber = TestProbe() + entity(TraceMetrics, "one-shot") + subscribe("trace", "one-shot", subscriber.ref, permanently = false) + + flushSubscriptions() + val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] + + tickSnapshot.metrics.size should be(1) + tickSnapshot.metrics.keys should contain(Entity("one-shot", "trace", defaultTags)) + + flushSubscriptions() + subscriber.expectNoMsg(1 second) + } + + "subscriptions should include default tags" in { + val subscriber = TestProbe() + + Kamon.metrics.histogram("histogram-with-tags").record(1) + Kamon.metrics.subscribe("**", "**", subscriber.ref, permanently = true) + flushSubscriptions() + + val tickSubscription = subscriber.expectMsgType[TickMetricSnapshot] + tickSubscription.metrics.head._1.tags.get("name") shouldBe Some("jason") + tickSubscription.metrics.head._1.tags.get("number") shouldBe Some("42") + tickSubscription.metrics.head._1.tags.get("username").isDefined shouldBe true + tickSubscription.metrics.head._1.tags.get("object.nested-bool") shouldBe Some("true") + tickSubscription.metrics.head._1.tags.get("object.nested-string") shouldBe Some("a string") + tickSubscription.metrics.head._1.tags.get("list") shouldBe None + } + + "allow subscribing permanently to a metric" in { + val subscriber = TestProbe() + entity(TraceMetrics, "permanent") + subscribe("trace", "permanent", subscriber.ref, permanently = true) + + for (repetition ← 1 to 5) { + flushSubscriptions() + val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] + + tickSnapshot.metrics.size should be(1) + tickSnapshot.metrics.keys should contain(Entity("permanent", "trace", defaultTags)) + } + } + + "allow subscribing to metrics matching a glob pattern" in { + val subscriber = TestProbe() + entity(TraceMetrics, "include-one") + entity(TraceMetrics, "exclude-two") + entity(TraceMetrics, "include-three") + subscribe("trace", "include-*", subscriber.ref, permanently = true) + + for (repetition ← 1 to 5) { + flushSubscriptions() + val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] + + tickSnapshot.metrics.size should be(2) + tickSnapshot.metrics.keys should contain(Entity("include-one", "trace", defaultTags)) + tickSnapshot.metrics.keys should contain(Entity("include-three", "trace", defaultTags)) + } + } + + "send a single TickMetricSnapshot to each subscriber, even if subscribed multiple times" in { + val subscriber = TestProbe() + entity(TraceMetrics, "include-one") + entity(TraceMetrics, "exclude-two") + entity(TraceMetrics, "include-three") + subscribe("trace", "include-one", subscriber.ref, permanently = true) + subscribe("trace", "include-three", subscriber.ref, permanently = true) + + for (repetition ← 1 to 5) { + flushSubscriptions() + val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] + + tickSnapshot.metrics.size should be(2) + tickSnapshot.metrics.keys should contain(Entity("include-one", "trace", defaultTags)) + tickSnapshot.metrics.keys should contain(Entity("include-three", "trace", defaultTags)) + } + } + + "allow un-subscribing a subscriber" in { + val subscriber = TestProbe() + entity(TraceMetrics, "one-shot") + subscribe("trace", "one-shot", subscriber.ref, permanently = true) + + flushSubscriptions() + val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] + tickSnapshot.metrics.size should be(1) + tickSnapshot.metrics.keys should contain(Entity("one-shot", "trace", defaultTags)) + + unsubscribe(subscriber.ref) + + flushSubscriptions() + subscriber.expectNoMsg(1 second) + } + } + + def subscriptionsActor: ActorRef = { + val listener = TestProbe() + system.actorSelection("/user/kamon/kamon-metrics").tell(Identify(1), listener.ref) + listener.expectMsgType[ActorIdentity].ref.get + } +} + +class ForwarderSubscriber(target: ActorRef) extends Actor { + def receive = { + case anything ⇒ target.forward(anything) + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala new file mode 100644 index 00000000..6172a9e0 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala @@ -0,0 +1,97 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import com.typesafe.config.ConfigFactory +import kamon.Kamon +import kamon.metric.instrument.Histogram.MutableRecord +import kamon.testkit.BaseKamonSpec +import kamon.util.MilliTimestamp +import akka.testkit.ImplicitSender +import scala.concurrent.duration._ +import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot + +class TickMetricSnapshotBufferSpec extends BaseKamonSpec("trace-metrics-spec") with ImplicitSender { + + "the TickMetricSnapshotBuffer" should { + "merge TickMetricSnapshots received until the flush timeout is reached and fix the from/to fields" in new SnapshotFixtures { + val buffer = system.actorOf(TickMetricSnapshotBuffer.props(3 seconds, testActor)) + + buffer ! firstEmpty + buffer ! secondEmpty + buffer ! thirdEmpty + + within(2 seconds)(expectNoMsg()) + val mergedSnapshot = expectMsgType[TickMetricSnapshot] + + mergedSnapshot.from.millis should equal(1000) + mergedSnapshot.to.millis should equal(4000) + mergedSnapshot.metrics should be('empty) + } + + "merge empty and non-empty snapshots" in new SnapshotFixtures { + val buffer = system.actorOf(TickMetricSnapshotBuffer.props(3 seconds, testActor)) + + buffer ! firstNonEmpty + buffer ! secondNonEmpty + buffer ! thirdEmpty + + within(2 seconds)(expectNoMsg()) + val mergedSnapshot = expectMsgType[TickMetricSnapshot] + + mergedSnapshot.from.millis should equal(1000) + mergedSnapshot.to.millis should equal(4000) + mergedSnapshot.metrics should not be ('empty) + + val testMetricSnapshot = mergedSnapshot.metrics(testTraceIdentity).histogram("elapsed-time").get + testMetricSnapshot.min should equal(10) + testMetricSnapshot.max should equal(300) + testMetricSnapshot.numberOfMeasurements should equal(6) + testMetricSnapshot.recordsIterator.toStream should contain allOf ( + MutableRecord(10, 3), + MutableRecord(20, 1), + MutableRecord(30, 1), + MutableRecord(300, 1) + ) + + } + } + + trait SnapshotFixtures { + val collectionContext = Kamon.metrics.buildDefaultCollectionContext + val testTraceIdentity = Entity("buffer-spec-test-trace", "trace") + val traceRecorder = Kamon.metrics.entity(TraceMetrics, "buffer-spec-test-trace") + + val firstEmpty = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map.empty) + val secondEmpty = TickMetricSnapshot(new MilliTimestamp(2000), new MilliTimestamp(3000), Map.empty) + val thirdEmpty = TickMetricSnapshot(new MilliTimestamp(3000), new MilliTimestamp(4000), Map.empty) + + traceRecorder.elapsedTime.record(10L) + traceRecorder.elapsedTime.record(20L) + traceRecorder.elapsedTime.record(30L) + val firstNonEmpty = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map( + (testTraceIdentity → traceRecorder.collect(collectionContext)) + )) + + traceRecorder.elapsedTime.record(10L) + traceRecorder.elapsedTime.record(10L) + traceRecorder.elapsedTime.record(300L) + val secondNonEmpty = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map( + (testTraceIdentity → traceRecorder.collect(collectionContext)) + )) + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/TraceMetricsSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/TraceMetricsSpec.scala new file mode 100644 index 00000000..4c44dc07 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/TraceMetricsSpec.scala @@ -0,0 +1,121 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric + +import akka.testkit.ImplicitSender +import com.typesafe.config.ConfigFactory +import kamon.testkit.BaseKamonSpec +import kamon.trace.Tracer +import kamon.metric.instrument.Histogram + +import scala.util.control.NoStackTrace + +class TraceMetricsSpec extends BaseKamonSpec("trace-metrics-spec") with ImplicitSender { + + "the TraceMetrics" should { + "record the elapsed time between a trace creation and finish" in { + for (repetitions ← 1 to 10) { + Tracer.withContext(newContext("record-elapsed-time")) { + Tracer.currentContext.finish() + } + } + + val snapshot = takeSnapshotOf("record-elapsed-time", "trace") + snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(10) + } + + "record the elapsed time for segments that occur inside a given trace" in { + Tracer.withContext(newContext("trace-with-segments")) { + val segment = Tracer.currentContext.startSegment("test-segment", "test-category", "test-library") + segment.finish() + Tracer.currentContext.finish() + } + + val snapshot = takeSnapshotOf("test-segment", "trace-segment", + tags = Map( + "trace" → "trace-with-segments", + "category" → "test-category", + "library" → "test-library" + )) + + snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) + } + + "record the elapsed time for segments that finish after their correspondent trace has finished" in { + val segment = Tracer.withContext(newContext("closing-segment-after-trace")) { + val s = Tracer.currentContext.startSegment("test-segment", "test-category", "test-library") + Tracer.currentContext.finish() + s + } + + val beforeFinishSegmentSnapshot = takeSnapshotOf("closing-segment-after-trace", "trace") + beforeFinishSegmentSnapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) + + intercept[NoSuchElementException] { + // The segment metric should not exist before we it has finished. + + takeSnapshotOf("test-segment", "trace-segment", + tags = Map( + "trace" → "closing-segment-after-trace", + "category" → "test-category", + "library" → "test-library" + )) + } + + segment.finish() + + val afterFinishSegmentSnapshot = takeSnapshotOf("test-segment", "trace-segment", + tags = Map( + "trace" → "closing-segment-after-trace", + "category" → "test-category", + "library" → "test-library" + )) + + afterFinishSegmentSnapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) + } + + "record the elapsed time between a trace creation and finish with an error" in { + for (repetitions ← 1 to 10) { + Tracer.withContext(newContext("record-elapsed-time-with-error")) { + Tracer.currentContext.finishWithError(new RuntimeException("awesome-trace-error") with NoStackTrace) + } + } + + val snapshot = takeSnapshotOf("record-elapsed-time-with-error", "trace") + snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(10) + snapshot.counter("errors").get.count should be(10) + } + + "record the elapsed time for segments that finish with an error and that occur inside a given trace" in { + Tracer.withContext(newContext("trace-with-segments")) { + val segment = Tracer.currentContext.startSegment("test-segment-with-error", "test-category", "test-library") + segment.finishWithError(new RuntimeException("awesome-segment-error") with NoStackTrace) + Tracer.currentContext.finish() + } + + val snapshot = takeSnapshotOf("test-segment-with-error", "trace-segment", + tags = Map( + "trace" → "trace-with-segments", + "category" → "test-category", + "library" → "test-library" + )) + + snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) + snapshot.counter("errors").get.count should be(1) + } + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/instrument/CounterSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/CounterSpec.scala new file mode 100644 index 00000000..d5d651e5 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/CounterSpec.scala @@ -0,0 +1,79 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import java.nio.LongBuffer + +import org.scalatest.{Matchers, WordSpec} + +class CounterSpec extends WordSpec with Matchers { + + "a Counter" should { + "allow increment only operations" in new CounterFixture { + counter.increment() + counter.increment(10) + + intercept[UnsupportedOperationException] { + counter.increment(-10) + } + } + + "reset to zero when a snapshot is taken" in new CounterFixture { + counter.increment(100) + takeSnapshotFrom(counter).count should be(100) + takeSnapshotFrom(counter).count should be(0) + takeSnapshotFrom(counter).count should be(0) + + counter.increment(50) + takeSnapshotFrom(counter).count should be(50) + takeSnapshotFrom(counter).count should be(0) + } + + "produce a snapshot that can be merged with others" in new CounterFixture { + val counterA = Counter() + val counterB = Counter() + counterA.increment(100) + counterB.increment(200) + + val counterASnapshot = takeSnapshotFrom(counterA) + val counterBSnapshot = takeSnapshotFrom(counterB) + + counterASnapshot.merge(counterBSnapshot, collectionContext).count should be(300) + counterBSnapshot.merge(counterASnapshot, collectionContext).count should be(300) + } + + "produce a snapshot that can be scaled" in new CounterFixture { + counter.increment(100) + + val counterSnapshot = takeSnapshotFrom(counter) + + val scaledSnapshot = counterSnapshot.scale(Time.Milliseconds, Time.Microseconds) + scaledSnapshot.count should be(100000) + } + + } + + trait CounterFixture { + val counter = Counter() + + val collectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(1) + } + + def takeSnapshotFrom(counter: Counter): Counter.Snapshot = counter.collect(collectionContext) + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/instrument/GaugeSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/GaugeSpec.scala new file mode 100644 index 00000000..ec07d66c --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/GaugeSpec.scala @@ -0,0 +1,79 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import java.util.concurrent.atomic.AtomicLong +import kamon.Kamon +import kamon.metric.instrument.Histogram.DynamicRange +import kamon.testkit.BaseKamonSpec +import scala.concurrent.duration._ + +class GaugeSpec extends BaseKamonSpec("gauge-spec") { + + "a Gauge" should { + "automatically record the current value using the configured refresh-interval" in new GaugeFixture { + val (numberOfValuesRecorded, gauge) = createGauge() + Thread.sleep(1.second.toMillis) + + numberOfValuesRecorded.get() should be(10L +- 1L) + gauge.cleanup + } + + "stop automatically recording after a call to cleanup" in new GaugeFixture { + val (numberOfValuesRecorded, gauge) = createGauge() + Thread.sleep(1.second.toMillis) + + gauge.cleanup + numberOfValuesRecorded.get() should be(10L +- 1L) + Thread.sleep(1.second.toMillis) + + numberOfValuesRecorded.get() should be(10L +- 1L) + } + + "produce a Histogram snapshot including all the recorded values" in new GaugeFixture { + val (numberOfValuesRecorded, gauge) = createGauge() + + Thread.sleep(1.second.toMillis) + gauge.cleanup + val snapshot = gauge.collect(Kamon.metrics.buildDefaultCollectionContext) + + snapshot.numberOfMeasurements should be(10L +- 1L) + snapshot.min should be(1) + snapshot.max should be(10L +- 1L) + } + + "not record the current value when doing a collection" in new GaugeFixture { + val (numberOfValuesRecorded, gauge) = createGauge(10 seconds) + + val snapshot = gauge.collect(Kamon.metrics.buildDefaultCollectionContext) + snapshot.numberOfMeasurements should be(0) + numberOfValuesRecorded.get() should be(0) + } + } + + trait GaugeFixture { + def createGauge(refreshInterval: FiniteDuration = 100 millis): (AtomicLong, Gauge) = { + val recordedValuesCounter = new AtomicLong(0) + val gauge = Gauge(DynamicRange(1, 100, 2), refreshInterval, Kamon.metrics.settings.refreshScheduler, { + () ⇒ recordedValuesCounter.addAndGet(1) + }) + + (recordedValuesCounter, gauge) + } + + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/instrument/HistogramSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/HistogramSpec.scala new file mode 100644 index 00000000..9551c6ea --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/HistogramSpec.scala @@ -0,0 +1,157 @@ +/* + * ========================================================================================= + * Copyright © 2013 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import java.nio.LongBuffer + +import kamon.metric.instrument.Histogram.DynamicRange +import org.scalatest.{Matchers, WordSpec} + +import scala.util.Random + +class HistogramSpec extends WordSpec with Matchers { + + "a Histogram" should { + "allow record values within the configured range" in new HistogramFixture { + histogram.record(1000) + histogram.record(5000, count = 100) + histogram.record(10000) + } + + "not fail when recording values higher than the highest trackable value" in new HistogramFixture { + histogram.record(Long.MaxValue) + } + + "reset all recorded levels to zero after a snapshot collection" in new HistogramFixture { + histogram.record(100) + histogram.record(200) + histogram.record(300) + + takeSnapshot().numberOfMeasurements should be(3) + takeSnapshot().numberOfMeasurements should be(0) + } + + "produce a snapshot" which { + "supports min, max, percentile, sum, numberOfMeasurements and recordsIterator operations" in new HistogramFixture { + histogram.record(100) + histogram.record(200, count = 200) + histogram.record(300) + histogram.record(900) + + val snapshot = takeSnapshot() + + snapshot.min should equal(100L +- 1L) + snapshot.max should equal(900L +- 9L) + snapshot.percentile(50.0D) should be(200) + snapshot.percentile(99.5D) should be(300) + snapshot.percentile(99.9D) should be(900) + snapshot.sum should be(41300) + snapshot.numberOfMeasurements should be(203) + + val records = snapshot.recordsIterator.map(r ⇒ r.level → r.count).toSeq + records.size should be(4) + records(0) should be(100 → 1) + records(1) should be(200 → 200) + records(2) should be(300 → 1) + records(3) should be(900 → 1) + } + + "can be scaled" in new HistogramFixture { + histogram.record(100) + histogram.record(200, count = 200) + histogram.record(300) + histogram.record(900) + + val snapshot = takeSnapshot().scale(Time.Seconds, Time.Milliseconds) + + snapshot.min should equal(100000L +- 1000L) + snapshot.max should equal(900000L +- 9000L) + snapshot.percentile(50.0D) should be(200000) + snapshot.percentile(99.5D) should be(300000) + snapshot.percentile(99.9D) should be(900000) + snapshot.sum should be(41300000) + snapshot.numberOfMeasurements should be(203) + + val records = snapshot.recordsIterator.map(r ⇒ r.level → r.count).toSeq + records.size should be(4) + records(0) should be(100000 → 1) + records(1) should be(200000 → 200) + records(2) should be(300000 → 1) + records(3) should be(900000 → 1) + } + + "can be merged with another snapshot" in new MultipleHistogramFixture { + val random = new Random(System.nanoTime()) + + for (repetitions ← 1 to 1000) { + // Put some values on A and Control + for (_ ← 1 to 1000) { + val newRecording = random.nextInt(100000) + controlHistogram.record(newRecording) + histogramA.record(newRecording) + } + + // Put some values on B and Control + for (_ ← 1 to 2000) { + val newRecording = random.nextInt(100000) + controlHistogram.record(newRecording) + histogramB.record(newRecording) + } + + val controlSnapshot = takeSnapshotFrom(controlHistogram) + val histogramASnapshot = takeSnapshotFrom(histogramA) + val histogramBSnapshot = takeSnapshotFrom(histogramB) + + assertEquals(controlSnapshot, histogramASnapshot.merge(histogramBSnapshot, collectionContext)) + assertEquals(controlSnapshot, histogramBSnapshot.merge(histogramASnapshot, collectionContext)) + } + } + } + } + + trait HistogramFixture { + val collectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(10000) + } + + val histogram = Histogram(DynamicRange(1, 100000, 2)) + + def takeSnapshot(): Histogram.Snapshot = histogram.collect(collectionContext) + } + + trait MultipleHistogramFixture { + val collectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(10000) + } + + val controlHistogram = Histogram(DynamicRange(1, 100000, 2)) + val histogramA = Histogram(DynamicRange(1, 100000, 2)) + val histogramB = Histogram(DynamicRange(1, 100000, 2)) + + def takeSnapshotFrom(histogram: Histogram): InstrumentSnapshot = histogram.collect(collectionContext) + + def assertEquals(left: InstrumentSnapshot, right: InstrumentSnapshot): Unit = { + val leftSnapshot = left.asInstanceOf[Histogram.Snapshot] + val rightSnapshot = right.asInstanceOf[Histogram.Snapshot] + + leftSnapshot.numberOfMeasurements should equal(rightSnapshot.numberOfMeasurements) + leftSnapshot.min should equal(rightSnapshot.min) + leftSnapshot.max should equal(rightSnapshot.max) + leftSnapshot.recordsIterator.toStream should contain theSameElementsAs (rightSnapshot.recordsIterator.toStream) + } + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala new file mode 100644 index 00000000..d007d4cd --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala @@ -0,0 +1,140 @@ +/* ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import java.nio.LongBuffer + +import akka.actor._ +import akka.testkit.TestProbe +import kamon.Kamon +import kamon.metric.instrument.Histogram.{DynamicRange, MutableRecord} +import kamon.testkit.BaseKamonSpec +import scala.concurrent.duration._ + +class MinMaxCounterSpec extends BaseKamonSpec("min-max-counter-spec") { + + "the MinMaxCounter" should { + "track ascending tendencies" in new MinMaxCounterFixture { + mmCounter.increment() + mmCounter.increment(3) + mmCounter.increment() + + val snapshot = collectCounterSnapshot() + + snapshot.min should be(0) + snapshot.max should be(5) + snapshot.recordsIterator.toStream should contain allOf ( + MutableRecord(0, 1), // min + MutableRecord(5, 2) + ) // max and current + } + + "track descending tendencies" in new MinMaxCounterFixture { + mmCounter.increment(5) + mmCounter.decrement() + mmCounter.decrement(3) + mmCounter.decrement() + + val snapshot = collectCounterSnapshot() + + snapshot.min should be(0) + snapshot.max should be(5) + snapshot.recordsIterator.toStream should contain allOf ( + MutableRecord(0, 2), // min and current + MutableRecord(5, 1) + ) // max + } + + "reset the min and max to the current value after taking a snapshot" in new MinMaxCounterFixture { + mmCounter.increment(5) + mmCounter.decrement(3) + + val firstSnapshot = collectCounterSnapshot() + + firstSnapshot.min should be(0) + firstSnapshot.max should be(5) + firstSnapshot.recordsIterator.toStream should contain allOf ( + MutableRecord(0, 1), // min + MutableRecord(2, 1), // current + MutableRecord(5, 1) + ) // max + + val secondSnapshot = collectCounterSnapshot() + + secondSnapshot.min should be(2) + secondSnapshot.max should be(2) + secondSnapshot.recordsIterator.toStream should contain( + MutableRecord(2, 3) + ) // min, max and current + } + + "report zero as the min and current values if the current value fell bellow zero" in new MinMaxCounterFixture { + mmCounter.decrement(3) + + val snapshot = collectCounterSnapshot() + + snapshot.min should be(0) + snapshot.max should be(0) + snapshot.recordsIterator.toStream should contain( + MutableRecord(0, 3) + ) // min, max and current (even while current really is -3 + } + + "never record values bellow zero in very busy situations" in new MinMaxCounterFixture { + val monitor = TestProbe() + val workers = for (workers ← 1 to 50) yield { + system.actorOf(Props(new MinMaxCounterUpdateActor(mmCounter, monitor.ref))) + } + + workers foreach (_ ! "increment") + for (refresh ← 1 to 1000) { + collectCounterSnapshot() + Thread.sleep(1) + } + + monitor.expectNoMsg() + workers foreach (_ ! PoisonPill) + } + } + + trait MinMaxCounterFixture { + val collectionContext = new CollectionContext { + val buffer: LongBuffer = LongBuffer.allocate(64) + } + + val mmCounter = MinMaxCounter(DynamicRange(1, 1000, 2), 1 hour, Kamon.metrics.settings.refreshScheduler) + mmCounter.cleanup // cancel the refresh schedule + + def collectCounterSnapshot(): Histogram.Snapshot = mmCounter.collect(collectionContext) + } +} + +class MinMaxCounterUpdateActor(mmc: MinMaxCounter, monitor: ActorRef) extends Actor { + val x = Array.ofDim[Int](4) + def receive = { + case "increment" ⇒ + mmc.increment() + self ! "decrement" + case "decrement" ⇒ + mmc.decrement() + self ! "increment" + try { + mmc.refreshValues() + } catch { + case _: IndexOutOfBoundsException ⇒ monitor ! "failed" + } + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala new file mode 100644 index 00000000..7133579e --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala @@ -0,0 +1,98 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.metric.instrument + +import kamon.metric.instrument.UnitOfMeasurement.Unknown +import org.scalatest.{Matchers, WordSpec} + +class UnitOfMeasurementSpec extends WordSpec with Matchers { + + "Time unit" should { + "resolve Time Unit by valid name" in { + Time("s") should be(Time.Seconds) + Time("n") should be(Time.Nanoseconds) + Time("ms") should be(Time.Milliseconds) + Time("µs") should be(Time.Microseconds) + } + "fail to resolve Time Unit by invalid name" in { + val ex = intercept[IllegalArgumentException](Time("boo")) + ex.getMessage should be("Can't recognize time unit 'boo'") + } + "scale time properly" in { + val epsilon = 0.0001 + + Time.Nanoseconds.scale(Time.Nanoseconds)(1000000D) should be(1000000D +- epsilon) + Time.Nanoseconds.scale(Time.Microseconds)(1000000D) should be(1000D +- epsilon) + Time.Nanoseconds.scale(Time.Milliseconds)(1000000D) should be(1D +- epsilon) + Time.Nanoseconds.scale(Time.Seconds)(1000000D) should be(0.001D +- epsilon) + Time.Seconds.scale(Time.Nanoseconds)(1D) should be(1000000000D +- epsilon) + } + "allow scale only time" in { + intercept[IllegalArgumentException](Time.Nanoseconds.tryScale(Unknown)(100)) + .getMessage should be("Can't scale different types of units `time` and `unknown`") + intercept[IllegalArgumentException](Time.Nanoseconds.tryScale(Memory.Bytes)(100)) + .getMessage should be("Can't scale different types of units `time` and `bytes`") + val epsilon = 0.0001 + + Time.Nanoseconds.tryScale(Time.Nanoseconds)(100D) should be(100D +- epsilon) + } + } + + "Memory unit" should { + "resolve Memory Unit by valid name" in { + Memory("b") should be(Memory.Bytes) + Memory("Kb") should be(Memory.KiloBytes) + Memory("Mb") should be(Memory.MegaBytes) + Memory("Gb") should be(Memory.GigaBytes) + } + "fail to resolve Memory Unit by invalid name" in { + val ex = intercept[IllegalArgumentException](Memory("boo")) + ex.getMessage should be("Can't recognize memory unit 'boo'") + } + "scale memory properly" in { + val epsilon = 0.0001 + + Memory.Bytes.scale(Memory.Bytes)(1000000D) should be(1000000D +- epsilon) + Memory.Bytes.scale(Memory.KiloBytes)(1000000D) should be(976.5625D +- epsilon) + Memory.Bytes.scale(Memory.MegaBytes)(1000000D) should be(0.9536D +- epsilon) + Memory.Bytes.scale(Memory.GigaBytes)(1000000D) should be(9.3132E-4D +- epsilon) + Memory.MegaBytes.scale(Memory.Bytes)(1D) should be(1048576D +- epsilon) + } + "allow scale only memory" in { + intercept[IllegalArgumentException](Memory.Bytes.tryScale(Unknown)(100)) + .getMessage should be("Can't scale different types of units `bytes` and `unknown`") + intercept[IllegalArgumentException](Memory.Bytes.tryScale(Time.Nanoseconds)(100)) + .getMessage should be("Can't scale different types of units `bytes` and `time`") + val epsilon = 0.0001 + + Memory.Bytes.tryScale(Memory.Bytes)(100D) should be(100D +- epsilon) + } + + } + + "Unknown unit" should { + "allow scale only Unknown" in { + intercept[IllegalArgumentException](Unknown.tryScale(Memory.Bytes)(100)) + .getMessage should be("Can't scale different types of units `unknown` and `bytes`") + intercept[IllegalArgumentException](Unknown.tryScale(Time.Nanoseconds)(100)) + .getMessage should be("Can't scale different types of units `unknown` and `time`") + + Unknown.scale(Unknown)(100D) should be(100D) + } + + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/testkit/BaseKamonSpec.scala b/kamon-core/src/legacy-test/scala/kamon/testkit/BaseKamonSpec.scala new file mode 100644 index 00000000..995f15fa --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/testkit/BaseKamonSpec.scala @@ -0,0 +1,70 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.testkit + +import akka.actor.ActorSystem +import akka.testkit.{ImplicitSender, TestKitBase} +import com.typesafe.config.Config +import kamon.{ActorSystemTools, Kamon} +import kamon.metric.{Entity, EntitySnapshot, SubscriptionsDispatcher} +import kamon.trace.TraceContext +import kamon.util.LazyActorRef +import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} + +abstract class BaseKamonSpec(actorSystemName: String) extends TestKitBase with WordSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll { + lazy val collectionContext = Kamon.metrics.buildDefaultCollectionContext + override implicit lazy val system: ActorSystem = { + Kamon.start(mergedConfig) + ActorSystem(actorSystemName, mergedConfig) + } + + def config: Config = Kamon.config + + def mergedConfig: Config = config.withFallback(Kamon.config) + + def newContext(name: String): TraceContext = + Kamon.tracer.newContext(name) + + def newContext(name: String, token: String): TraceContext = + Kamon.tracer.newContext(name, Option(token)) + + def newContext(name: String, token: String, tags: Map[String, String]): TraceContext = + Kamon.tracer.newContext(name, Option(token), tags) + + def takeSnapshotOf(name: String, category: String): EntitySnapshot = { + val recorder = Kamon.metrics.find(name, category).get + recorder.collect(collectionContext) + } + + def takeSnapshotOf(name: String, category: String, tags: Map[String, String]): EntitySnapshot = { + val recorder = Kamon.metrics.find(Entity(name, category, tags)).get + recorder.collect(collectionContext) + } + + def flushSubscriptions(): Unit = { + val subscriptionsField = Kamon.metrics.getClass.getDeclaredField("_subscriptions") + subscriptionsField.setAccessible(true) + val subscriptions = subscriptionsField.get(Kamon.metrics).asInstanceOf[LazyActorRef] + + subscriptions.tell(SubscriptionsDispatcher.Tick) + } + + override protected def afterAll(): Unit = { + Kamon.shutdown() + ActorSystemTools.terminateActorSystem(system) + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/trace/SamplerSpec.scala b/kamon-core/src/legacy-test/scala/kamon/trace/SamplerSpec.scala new file mode 100644 index 00000000..88cdf116 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/trace/SamplerSpec.scala @@ -0,0 +1,76 @@ +package kamon.trace + +import kamon.testkit.BaseKamonSpec +import kamon.util.NanoInterval + +class SamplerSpec extends BaseKamonSpec("sampler-spec") { + + "the Sampler" should { + "work as intended" when { + "using all mode" in { + val sampler = SampleAll + + sampler.shouldTrace should be(true) + + sampler.shouldReport(NanoInterval.default) should be(true) + } + + "using random mode" in { + val sampler = new RandomSampler(100) + + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(true) + + sampler.shouldReport(NanoInterval.default) should be(true) + } + + "using ordered mode" in { + var sampler = new OrderedSampler(1) + + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(true) + + sampler = new OrderedSampler(2) + + sampler.shouldTrace should be(false) + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(false) + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(false) + sampler.shouldTrace should be(true) + + sampler.shouldReport(NanoInterval.default) should be(true) + } + + "using threshold mode" in { + val sampler = new ThresholdSampler(new NanoInterval(10000000L)) + + sampler.shouldTrace should be(true) + + sampler.shouldReport(new NanoInterval(5000000L)) should be(false) + sampler.shouldReport(new NanoInterval(10000000L)) should be(true) + sampler.shouldReport(new NanoInterval(15000000L)) should be(true) + sampler.shouldReport(new NanoInterval(0L)) should be(false) + } + + "using clock mode" in { + val sampler = new ClockSampler(new NanoInterval(10000000L)) + + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(false) + Thread.sleep(1L) + sampler.shouldTrace should be(false) + Thread.sleep(10L) + sampler.shouldTrace should be(true) + sampler.shouldTrace should be(false) + + sampler.shouldReport(NanoInterval.default) should be(true) + } + } + } + +} diff --git a/kamon-core/src/legacy-test/scala/kamon/trace/SimpleTraceSpec.scala b/kamon-core/src/legacy-test/scala/kamon/trace/SimpleTraceSpec.scala new file mode 100644 index 00000000..fe7ad053 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/trace/SimpleTraceSpec.scala @@ -0,0 +1,167 @@ +/* + * ========================================================================================= + * Copyright © 2013-2016 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import kamon.Kamon +import kamon.testkit.BaseKamonSpec + +import scala.concurrent.duration._ +import scala.util.control.NoStackTrace + +class SimpleTraceSpec extends BaseKamonSpec("simple-trace-spec") { + + "the simple tracing" should { + "send a TraceInfo when the trace has finished and all segments are finished" in { + Kamon.tracer.subscribe(testActor) + + Tracer.withContext(newContext("simple-trace-without-segments")) { + Tracer.currentContext.startSegment("segment-one", "test-segment", "test").finish() + Tracer.currentContext.startSegment("segment-two", "test-segment", "test").finish() + Tracer.currentContext.finish() + } + + val traceInfo = expectMsgType[TraceInfo] + Kamon.tracer.unsubscribe(testActor) + + traceInfo.name should be("simple-trace-without-segments") + traceInfo.segments.size should be(2) + traceInfo.segments.find(_.name == "segment-one") should be('defined) + traceInfo.segments.find(_.name == "segment-two") should be('defined) + } + + "send a TraceInfo when the trace has finished with error and all segments are finished" in { + Kamon.tracer.subscribe(testActor) + + Tracer.withContext(newContext("simple-trace-with-error-and-without-segments")) { + Tracer.currentContext.startSegment("segment-one", "test-segment", "test").finish() + Tracer.currentContext.startSegment("segment-two", "test-segment", "test").finish() + Tracer.currentContext.finishWithError(TraceException("awesome-trace-error")) + } + + val traceInfo = expectMsgType[TraceInfo] + Kamon.tracer.unsubscribe(testActor) + + traceInfo.name should be("simple-trace-with-error-and-without-segments") + traceInfo.status should be(Status.FinishedWithError) + traceInfo.segments.size should be(2) + traceInfo.segments.find(_.name == "segment-one") should be('defined) + traceInfo.segments.find(_.name == "segment-two") should be('defined) + } + + "send a TraceInfo when the trace has finished with error and all segments are finished with error" in { + Kamon.tracer.subscribe(testActor) + + Tracer.withContext(newContext("simple-trace-with-error-and-without-segments")) { + Tracer.currentContext.startSegment("segment-one-finished-with-error", "test-segment", "test").finishWithError(SegmentException("awesome-segment-exception")) + Tracer.currentContext.startSegment("segment-two-finished-with-error", "test-segment", "test").finishWithError(SegmentException("awesome-segment-exception")) + Tracer.currentContext.finishWithError(TraceException("awesome-trace-error")) + } + + val traceInfo = expectMsgType[TraceInfo] + Kamon.tracer.unsubscribe(testActor) + + traceInfo.name should be("simple-trace-with-error-and-without-segments") + traceInfo.status should be(Status.FinishedWithError) + traceInfo.segments.size should be(2) + + val segmentOne = traceInfo.segments.find(_.name == "segment-one-finished-with-error") + val segmentTwo = traceInfo.segments.find(_.name == "segment-two-finished-with-error") + + segmentOne.get.status should be(Status.FinishedWithError) + segmentTwo.get.status should be(Status.FinishedWithError) + } + + "send a TraceInfo when the trace has finished and all segments are finished and both contains tags" in { + Kamon.tracer.subscribe(testActor) + + Tracer.withContext(newContext("simple-trace-without-segments", "awesome-token", Map("environment" → "production"))) { + Tracer.currentContext.startSegment("segment-one", "test-segment", "test", Map("segment-one-info" → "info")).finish() + Tracer.currentContext.startSegment("segment-two", "test-segment", "test", Map("segment-two-info" → "info")).finish() + Tracer.currentContext.finish() + } + + val traceInfo = expectMsgType[TraceInfo] + Kamon.tracer.unsubscribe(testActor) + + traceInfo.name should be("simple-trace-without-segments") + traceInfo.tags should be(Map("environment" → "production")) + traceInfo.segments.size should be(2) + + val segmentOne = traceInfo.segments.find(_.name == "segment-one") + val segmentTwo = traceInfo.segments.find(_.name == "segment-two") + + segmentOne.get.tags should be(Map("segment-one-info" → "info")) + segmentTwo.get.tags should be(Map("segment-two-info" → "info")) + } + + "send a TraceInfo when the trace has finished and all segments are finished and contains added tag" in { + Kamon.tracer.subscribe(testActor) + + Tracer.withContext(newContext("simple-trace-without-segments", "awesome-token")) { + Tracer.currentContext.addTag("environment", "production") + val segmentOne = Tracer.currentContext.startSegment("segment-one", "test-segment", "test") + segmentOne.addTag("segment-one-info", "info") + segmentOne.finish() + val segmentTwo = Tracer.currentContext.startSegment("segment-two", "test-segment", "test") + segmentTwo.addTag("segment-two-info", "info") + segmentTwo.finish() + Tracer.currentContext.finish() + } + + val traceInfo = expectMsgType[TraceInfo] + Kamon.tracer.unsubscribe(testActor) + + traceInfo.name should be("simple-trace-without-segments") + traceInfo.tags should be(Map("environment" → "production")) + traceInfo.segments.size should be(2) + + val segmentOne = traceInfo.segments.find(_.name == "segment-one") + val segmentTwo = traceInfo.segments.find(_.name == "segment-two") + + segmentOne.get.tags should be(Map("segment-one-info" → "info")) + segmentTwo.get.tags should be(Map("segment-two-info" → "info")) + } + + "incubate the tracing context if there are open segments after finishing" in { + Kamon.tracer.subscribe(testActor) + + val secondSegment = Tracer.withContext(newContext("simple-trace-without-segments")) { + Tracer.currentContext.startSegment("segment-one", "test-segment", "test").finish() + val segment = Tracer.currentContext.startSegment("segment-two", "test-segment", "test") + Tracer.currentContext.finish() + segment + } + + expectNoMsg(2 seconds) + secondSegment.finish() + + within(10 seconds) { + val traceInfo = expectMsgType[TraceInfo] + Kamon.tracer.unsubscribe(testActor) + + traceInfo.name should be("simple-trace-without-segments") + traceInfo.segments.size should be(2) + traceInfo.segments.find(_.name == "segment-one") should be('defined) + traceInfo.segments.find(_.name == "segment-two") should be('defined) + } + } + + } +} + +case class TraceException(message: String) extends RuntimeException(message) with NoStackTrace +case class SegmentException(message: String) extends RuntimeException(message) with NoStackTrace \ No newline at end of file diff --git a/kamon-core/src/legacy-test/scala/kamon/trace/TraceContextManipulationSpec.scala b/kamon-core/src/legacy-test/scala/kamon/trace/TraceContextManipulationSpec.scala new file mode 100644 index 00000000..d817186a --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/trace/TraceContextManipulationSpec.scala @@ -0,0 +1,113 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import kamon.testkit.BaseKamonSpec + +class TraceContextManipulationSpec extends BaseKamonSpec("trace-metrics-spec") { + + "the TraceContext api" should { + "allow starting a trace within a specified block of code, and only within that block of code" in { + val createdContext = Tracer.withContext(newContext("start-context")) { + Tracer.currentContext should not be empty + Tracer.currentContext + } + + Tracer.currentContext shouldBe empty + createdContext.name shouldBe ("start-context") + } + + "allow starting a trace within a specified block of code, providing a trace-token and only within that block of code" in { + val createdContext = Tracer.withContext(newContext("start-context-with-token", "token-1")) { + Tracer.currentContext should not be empty + Tracer.currentContext + } + + Tracer.currentContext shouldBe empty + createdContext.name shouldBe ("start-context-with-token") + createdContext.token should be("token-1") + } + + "allow providing a TraceContext and make it available within a block of code" in { + val createdContext = newContext("manually-provided-trace-context") + + Tracer.currentContext shouldBe empty + Tracer.withContext(createdContext) { + Tracer.currentContext should be(createdContext) + } + + Tracer.currentContext shouldBe empty + } + + "allow renaming a trace" in { + val createdContext = Tracer.withContext(newContext("trace-before-rename")) { + Tracer.currentContext.rename("renamed-trace") + Tracer.currentContext + } + + Tracer.currentContext shouldBe empty + createdContext.name shouldBe "renamed-trace" + } + + "allow tagging and untagging a trace" in { + val createdContext = Tracer.withContext(newContext("trace-before-rename")) { + Tracer.currentContext.addTag("trace-tag", "tag-1") + Tracer.currentContext + } + + Tracer.currentContext shouldBe empty + createdContext.tags shouldBe Map("trace-tag" → "tag-1") + + createdContext.removeTag("trace-tag", "tag-1") + + createdContext.tags shouldBe Map.empty + } + + "allow creating a segment within a trace" in { + val createdContext = Tracer.withContext(newContext("trace-with-segments")) { + Tracer.currentContext.startSegment("segment-1", "segment-1-category", "segment-library") + Tracer.currentContext + } + + Tracer.currentContext shouldBe empty + createdContext.name shouldBe "trace-with-segments" + } + + "allow renaming a segment" in { + Tracer.withContext(newContext("trace-with-renamed-segment")) { + val segment = Tracer.currentContext.startSegment("original-segment-name", "segment-label", "segment-library") + segment.name should be("original-segment-name") + + segment.rename("new-segment-name") + segment.name should be("new-segment-name") + } + } + + "allow tagging and untagging a segment" in { + Tracer.withContext(newContext("trace-with-renamed-segment")) { + val segment = Tracer.currentContext.startSegment("segment-name", "segment-label", "segment-library") + segment.tags should be(Map.empty) + + segment.addTag("segment-tag", "tag-1") + segment.tags should be(Map("segment-tag" → "tag-1")) + + segment.removeTag("segment-tag", "tag-1") + segment.tags should be(Map.empty) + } + } + } +} \ No newline at end of file diff --git a/kamon-core/src/legacy-test/scala/kamon/trace/TraceLocalSpec.scala b/kamon-core/src/legacy-test/scala/kamon/trace/TraceLocalSpec.scala new file mode 100644 index 00000000..41d5bc83 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/trace/TraceLocalSpec.scala @@ -0,0 +1,108 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.trace + +import kamon.testkit.BaseKamonSpec +import kamon.trace.TraceLocal.AvailableToMdc +import kamon.trace.logging.MdcKeysSupport +import kamon.util.Supplier +import org.scalatest.concurrent.PatienceConfiguration +import org.scalatest.OptionValues +import org.slf4j.MDC + +class TraceLocalSpec extends BaseKamonSpec("trace-local-spec") with PatienceConfiguration with OptionValues with MdcKeysSupport { + val SampleTraceLocalKeyAvailableToMDC = AvailableToMdc("someKey") + + object SampleTraceLocalKey extends TraceLocal.TraceLocalKey[String] + + "the TraceLocal storage" should { + "allow storing and retrieving values" in { + Tracer.withContext(newContext("store-and-retrieve-trace-local")) { + val testString = "Hello World" + + TraceLocal.store(SampleTraceLocalKey)(testString) + TraceLocal.retrieve(SampleTraceLocalKey).value should equal(testString) + } + } + + "return None when retrieving a non existent key" in { + Tracer.withContext(newContext("non-existent-key")) { + TraceLocal.retrieve(SampleTraceLocalKey) should equal(None) + } + } + + "throws an exception when trying to get a non existent key" in { + Tracer.withContext(newContext("non-existent-key")) { + intercept[NoSuchElementException] { + TraceLocal.get(SampleTraceLocalKey) + } + } + } + + "return the given value when retrieving a non existent key" in { + Tracer.withContext(newContext("non-existent-key")) { + TraceLocal.getOrElse(SampleTraceLocalKey, new Supplier[String] { def get = "optionalValue" }) should equal("optionalValue") + } + } + + "return None when retrieving a key without a current TraceContext" in { + TraceLocal.retrieve(SampleTraceLocalKey) should equal(None) + } + + "be attached to the TraceContext when it is propagated" in { + val testString = "Hello World" + val testContext = Tracer.withContext(newContext("manually-propagated-trace-local")) { + TraceLocal.store(SampleTraceLocalKey)(testString) + TraceLocal.retrieve(SampleTraceLocalKey).value should equal(testString) + Tracer.currentContext + } + + /** No TraceLocal should be available here */ + TraceLocal.retrieve(SampleTraceLocalKey) should equal(None) + + Tracer.withContext(testContext) { + TraceLocal.retrieve(SampleTraceLocalKey).value should equal(testString) + } + } + + "allow retrieve a value from the MDC when was created a key with AvailableToMdc(cool-key)" in { + Tracer.withContext(newContext("store-and-retrieve-trace-local-and-copy-to-mdc")) { + val testString = "Hello MDC" + + TraceLocal.store(SampleTraceLocalKeyAvailableToMDC)(testString) + TraceLocal.retrieve(SampleTraceLocalKeyAvailableToMDC).value should equal(testString) + + withMdc { + MDC.get("someKey") should equal(testString) + } + } + } + + "allow retrieve a value from the MDC when was created a key with AvailableToMdc.storeForMdc(String, String)" in { + Tracer.withContext(newContext("store-and-retrieve-trace-local-and-copy-to-mdc")) { + val testString = "Hello MDC" + + TraceLocal.storeForMdc("someKey", testString) + TraceLocal.retrieve(SampleTraceLocalKeyAvailableToMDC).value should equal(testString) + + withMdc { + MDC.get("someKey") should equal(testString) + } + } + } + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala b/kamon-core/src/legacy-test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala new file mode 100644 index 00000000..39374e79 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala @@ -0,0 +1,44 @@ +package kamon.trace.logging + +import kamon.testkit.BaseKamonSpec +import kamon.trace.{EmptyTraceContext, Tracer} +import org.slf4j.MDC + +class MdcKeysSupportSpec extends BaseKamonSpec("mdc-keys-support-spec") { + + "Running code with MDC support" should { + "add nothing to the MDC" when { + "the trace context is empty" in { + // Given an empty trace context. + Tracer.withContext(EmptyTraceContext) { + // When some code is executed with MDC support. + MdcKeysSupport.withMdc { + // Then the MDC should not contain the trace token. + Option(MDC.get(MdcKeysSupport.traceTokenKey)) should be(None) + // Or name + Option(MDC.get(MdcKeysSupport.traceNameKey)) should be(None) + } + } + } + } + "add the trace token and name to the context" when { + "the trace context is not empty" in { + // Given a trace context. + Tracer.withNewContext("name", Some("token")) { + // When some code is executed with MDC support. + MdcKeysSupport.withMdc { + // Then the MDC should contain the trace token. + Option(MDC.get(MdcKeysSupport.traceTokenKey)) should be(Some("token")) + // And name + Option(MDC.get(MdcKeysSupport.traceNameKey)) should be(Some("name")) + } + + // Then after code is executed the MDC should have been cleared. + Option(MDC.get(MdcKeysSupport.traceTokenKey)) should be(None) + Option(MDC.get(MdcKeysSupport.traceNameKey)) should be(None) + } + } + } + } + +} diff --git a/kamon-core/src/legacy-test/scala/kamon/util/GlobPathFilterSpec.scala b/kamon-core/src/legacy-test/scala/kamon/util/GlobPathFilterSpec.scala new file mode 100644 index 00000000..7d585087 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/util/GlobPathFilterSpec.scala @@ -0,0 +1,73 @@ +/* + * ========================================================================================= + * Copyright © 2013-2014 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import org.scalatest.{Matchers, WordSpecLike} + +class GlobPathFilterSpec extends WordSpecLike with Matchers { + "The GlobPathFilter" should { + + "match a single expression" in { + val filter = new GlobPathFilter("/user/actor") + + filter.accept("/user/actor") shouldBe true + + filter.accept("/user/actor/something") shouldBe false + filter.accept("/user/actor/somethingElse") shouldBe false + } + + "match all expressions in the same level" in { + val filter = new GlobPathFilter("/user/*") + + filter.accept("/user/actor") shouldBe true + filter.accept("/user/otherActor") shouldBe true + + filter.accept("/user/something/actor") shouldBe false + filter.accept("/user/something/otherActor") shouldBe false + } + + "match all expressions in the same levelss" in { + val filter = new GlobPathFilter("**") + + filter.accept("GET: /ping") shouldBe true + filter.accept("GET: /ping/pong") shouldBe true + } + + "match all expressions and crosses the path boundaries" in { + val filter = new GlobPathFilter("/user/actor-**") + + filter.accept("/user/actor-") shouldBe true + filter.accept("/user/actor-one") shouldBe true + filter.accept("/user/actor-one/other") shouldBe true + + filter.accept("/user/something/actor") shouldBe false + filter.accept("/user/something/otherActor") shouldBe false + } + + "match exactly one character" in { + val filter = new GlobPathFilter("/user/actor-?") + + filter.accept("/user/actor-1") shouldBe true + filter.accept("/user/actor-2") shouldBe true + filter.accept("/user/actor-3") shouldBe true + + filter.accept("/user/actor-one") shouldBe false + filter.accept("/user/actor-two") shouldBe false + filter.accept("/user/actor-tree") shouldBe false + } + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/util/NeedToScaleSpec.scala b/kamon-core/src/legacy-test/scala/kamon/util/NeedToScaleSpec.scala new file mode 100644 index 00000000..cba6ad98 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/util/NeedToScaleSpec.scala @@ -0,0 +1,67 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import com.typesafe.config.ConfigFactory +import kamon.metric.instrument.{Memory, Time} +import org.scalatest.{Matchers, WordSpec} + +class NeedToScaleSpec extends WordSpec with Matchers { + + "NeedToScale" should { + "extract time unit to scale to from config" in { + val config = ConfigFactory.parseString( + """ + |time-units = "ms" + """.stripMargin + ) + + config match { + case NeedToScale(timeUnits, memoryUnits) ⇒ + timeUnits should be(Some(Time.Milliseconds)) + memoryUnits should be(None) + } + } + "extract memory unit to scale to from config" in { + val config = ConfigFactory.parseString( + """ + |memory-units = "kb" + """.stripMargin + ) + + config match { + case NeedToScale(timeUnits, memoryUnits) ⇒ + timeUnits should be(None) + memoryUnits should be(Some(Memory.KiloBytes)) + } + } + "extract nothing if config has no proper keys" in { + val config = ConfigFactory.parseString( + """ + |some-other-key = "value" + """.stripMargin + ) + + config match { + case NeedToScale(timeUnits, memoryUnits) ⇒ + fail("Should not match") + case _ ⇒ + } + } + } + +} diff --git a/kamon-core/src/legacy-test/scala/kamon/util/RegexPathFilterSpec.scala b/kamon-core/src/legacy-test/scala/kamon/util/RegexPathFilterSpec.scala new file mode 100644 index 00000000..a2cc8629 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/util/RegexPathFilterSpec.scala @@ -0,0 +1,59 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util + +import org.scalatest.{Matchers, WordSpecLike} + +class RegexPathFilterSpec extends WordSpecLike with Matchers { + "The RegexPathFilter" should { + + "match a single expression" in { + val filter = new RegexPathFilter("/user/actor") + + filter.accept("/user/actor") shouldBe true + + filter.accept("/user/actor/something") shouldBe false + filter.accept("/user/actor/somethingElse") shouldBe false + } + + "match arbitray expressions ending with wildcard" in { + val filter = new RegexPathFilter("/user/.*") + + filter.accept("/user/actor") shouldBe true + filter.accept("/user/otherActor") shouldBe true + filter.accept("/user/something/actor") shouldBe true + filter.accept("/user/something/otherActor") shouldBe true + + filter.accept("/otheruser/actor") shouldBe false + filter.accept("/otheruser/otherActor") shouldBe false + filter.accept("/otheruser/something/actor") shouldBe false + filter.accept("/otheruser/something/otherActor") shouldBe false + } + + "match numbers" in { + val filter = new RegexPathFilter("/user/actor-\\d") + + filter.accept("/user/actor-1") shouldBe true + filter.accept("/user/actor-2") shouldBe true + filter.accept("/user/actor-3") shouldBe true + + filter.accept("/user/actor-one") shouldBe false + filter.accept("/user/actor-two") shouldBe false + filter.accept("/user/actor-tree") shouldBe false + } + } +} diff --git a/kamon-core/src/legacy-test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala b/kamon-core/src/legacy-test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala new file mode 100644 index 00000000..4e5394f8 --- /dev/null +++ b/kamon-core/src/legacy-test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala @@ -0,0 +1,75 @@ +/* + * ========================================================================================= + * Copyright © 2013-2015 the kamon project + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * ========================================================================================= + */ + +package kamon.util.executors + +import java.util.concurrent.Executors + +import kamon.Kamon +import kamon.metric.{Entity, EntityRecorder} +import kamon.testkit.BaseKamonSpec + +class ExecutorServiceMetricsSpec extends BaseKamonSpec("executor-service-metrics-spec") { + + "the ExecutorServiceMetrics" should { + "register a SingleThreadPool, collect their metrics and remove it" in { + val singleThreadPoolExecutor = Executors.newSingleThreadExecutor() + val singleThreadPoolExecutorEntity = ExecutorServiceMetrics.register("single-thread-pool", singleThreadPoolExecutor) + findExecutorRecorder(singleThreadPoolExecutorEntity) should not be empty + + ExecutorServiceMetrics.remove(singleThreadPoolExecutorEntity) + findExecutorRecorder(singleThreadPoolExecutorEntity) should be(empty) + } + + "register a ThreadPoolExecutor, collect their metrics and remove it" in { + val threadPoolExecutor = Executors.newCachedThreadPool() + val threadPoolExecutorEntity = ExecutorServiceMetrics.register("thread-pool-executor", threadPoolExecutor) + findExecutorRecorder(threadPoolExecutorEntity) should not be empty + + ExecutorServiceMetrics.remove(threadPoolExecutorEntity) + findExecutorRecorder(threadPoolExecutorEntity) should be(empty) + } + + "register a ScheduledThreadPoolExecutor, collect their metrics and remove it" in { + val scheduledThreadPoolExecutor = Executors.newSingleThreadScheduledExecutor() + val scheduledThreadPoolEntity = ExecutorServiceMetrics.register("scheduled-thread-pool-executor", scheduledThreadPoolExecutor) + findExecutorRecorder(scheduledThreadPoolEntity) should not be empty + + ExecutorServiceMetrics.remove(scheduledThreadPoolEntity) + findExecutorRecorder(scheduledThreadPoolEntity) should be(empty) + } + + "register a Java ForkJoinPool, collect their metrics and remove it" in { + val javaForkJoinPool = Executors.newWorkStealingPool() + val javaForkJoinPoolEntity = ExecutorServiceMetrics.register("java-fork-join-pool", javaForkJoinPool) + findExecutorRecorder(javaForkJoinPoolEntity) should not be empty + + ExecutorServiceMetrics.remove(javaForkJoinPoolEntity) + findExecutorRecorder(javaForkJoinPoolEntity) should be(empty) + } + + "register a Scala ForkJoinPool, collect their metrics and remove it" in { + val scalaForkJoinPool = new scala.concurrent.forkjoin.ForkJoinPool() + val scalaForkJoinPoolEntity = ExecutorServiceMetrics.register("scala-fork-join-pool", scalaForkJoinPool) + findExecutorRecorder(scalaForkJoinPoolEntity) should not be empty + + ExecutorServiceMetrics.remove(scalaForkJoinPoolEntity) + findExecutorRecorder(scalaForkJoinPoolEntity) should be(empty) + } + + def findExecutorRecorder(entity: Entity): Option[EntityRecorder] = Kamon.metrics.find(entity) + } +} diff --git a/kamon-core/src/main/java/kamon/jsr166/LongAdder.java b/kamon-core/src/main/java/kamon/jsr166/LongAdder.java deleted file mode 100644 index 7e47ae63..00000000 --- a/kamon-core/src/main/java/kamon/jsr166/LongAdder.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - -Note: this was copied from Doug Lea's CVS repository - http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ - -LongAdder.java version 1.14 - -*/ - - -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package kamon.jsr166; - -import java.io.Serializable; - -/** - * One or more variables that together maintain an initially zero - * {@code long} sum. When updates (method {@link #add}) are contended - * across threads, the set of variables may grow dynamically to reduce - * contention. Method {@link #sum} (or, equivalently, {@link - * #longValue}) returns the current total combined across the - * variables maintaining the sum. - * - *

This class is usually preferable to {@link java.util.concurrent.atomic.AtomicLong} when - * multiple threads update a common sum that is used for purposes such - * as collecting statistics, not for fine-grained synchronization - * control. Under low update contention, the two classes have similar - * characteristics. But under high contention, expected throughput of - * this class is significantly higher, at the expense of higher space - * consumption. - * - *

This class extends {@link Number}, but does not define - * methods such as {@code equals}, {@code hashCode} and {@code - * compareTo} because instances are expected to be mutated, and so are - * not useful as collection keys. - * - *

jsr166e note: This class is targeted to be placed in - * java.util.concurrent.atomic. - * - * @since 1.8 - * @author Doug Lea - */ -public class LongAdder extends Striped64 implements Serializable { - private static final long serialVersionUID = 7249069246863182397L; - - /** - * Version of plus for use in retryUpdate - */ - final long fn(long v, long x) { return v + x; } - - /** - * Creates a new adder with initial sum of zero. - */ - public LongAdder() { - } - - /** - * Adds the given value. - * - * @param x the value to add - */ - public void add(long x) { - Cell[] as; long b, v; HashCode hc; Cell a; int n; - if ((as = cells) != null || !casBase(b = base, b + x)) { - boolean uncontended = true; - int h = (hc = threadHashCode.get()).code; - if (as == null || (n = as.length) < 1 || - (a = as[(n - 1) & h]) == null || - !(uncontended = a.cas(v = a.value, v + x))) - retryUpdate(x, hc, uncontended); - } - } - - /** - * Equivalent to {@code add(1)}. - */ - public void increment() { - add(1L); - } - - /** - * Equivalent to {@code add(-1)}. - */ - public void decrement() { - add(-1L); - } - - /** - * Returns the current sum. The returned value is NOT an - * atomic snapshot; invocation in the absence of concurrent - * updates returns an accurate result, but concurrent updates that - * occur while the sum is being calculated might not be - * incorporated. - * - * @return the sum - */ - public long sum() { - long sum = base; - Cell[] as = cells; - if (as != null) { - int n = as.length; - for (int i = 0; i < n; ++i) { - Cell a = as[i]; - if (a != null) - sum += a.value; - } - } - return sum; - } - - /** - * Resets variables maintaining the sum to zero. This method may - * be a useful alternative to creating a new adder, but is only - * effective if there are no concurrent updates. Because this - * method is intrinsically racy, it should only be used when it is - * known that no threads are concurrently updating. - */ - public void reset() { - internalReset(0L); - } - - /** - * Equivalent in effect to {@link #sum} followed by {@link - * #reset}. This method may apply for example during quiescent - * points between multithreaded computations. If there are - * updates concurrent with this method, the returned value is - * not guaranteed to be the final value occurring before - * the reset. - * - * @return the sum - */ - public long sumThenReset() { - long sum = base; - Cell[] as = cells; - base = 0L; - if (as != null) { - int n = as.length; - for (int i = 0; i < n; ++i) { - Cell a = as[i]; - if (a != null) { - sum += a.value; - a.value = 0L; - } - } - } - return sum; - } - - public long sumAndReset() { - long sum = getAndSetBase(0L); - Cell[] as = cells; - if (as != null) { - int n = as.length; - for (int i = 0; i < n; ++i) { - Cell a = as[i]; - if (a != null) { - sum += a.getAndSet(0L); - } - } - } - return sum; - } - - /** - * Returns the String representation of the {@link #sum}. - * @return the String representation of the {@link #sum} - */ - public String toString() { - return Long.toString(sum()); - } - - /** - * Equivalent to {@link #sum}. - * - * @return the sum - */ - public long longValue() { - return sum(); - } - - /** - * Returns the {@link #sum} as an {@code int} after a narrowing - * primitive conversion. - */ - public int intValue() { - return (int)sum(); - } - - /** - * Returns the {@link #sum} as a {@code float} - * after a widening primitive conversion. - */ - public float floatValue() { - return (float)sum(); - } - - /** - * Returns the {@link #sum} as a {@code double} after a widening - * primitive conversion. - */ - public double doubleValue() { - return (double)sum(); - } - - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - s.defaultWriteObject(); - s.writeLong(sum()); - } - - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - s.defaultReadObject(); - busy = 0; - cells = null; - base = s.readLong(); - } - -} diff --git a/kamon-core/src/main/java/kamon/jsr166/LongMaxUpdater.java b/kamon-core/src/main/java/kamon/jsr166/LongMaxUpdater.java deleted file mode 100644 index fc9ea4e5..00000000 --- a/kamon-core/src/main/java/kamon/jsr166/LongMaxUpdater.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package kamon.jsr166; -import java.io.Serializable; - -/** - * One or more variables that together maintain a running {@code long} - * maximum with initial value {@code Long.MIN_VALUE}. When updates - * (method {@link #update}) are contended across threads, the set of - * variables may grow dynamically to reduce contention. Method {@link - * #max} (or, equivalently, {@link #longValue}) returns the current - * maximum across the variables maintaining updates. - * - *

This class extends {@link Number}, but does not define - * methods such as {@code equals}, {@code hashCode} and {@code - * compareTo} because instances are expected to be mutated, and so are - * not useful as collection keys. - * - *

jsr166e note: This class is targeted to be placed in - * java.util.concurrent.atomic. - * - * @since 1.8 - * @author Doug Lea - */ -public class LongMaxUpdater extends Striped64 implements Serializable { - private static final long serialVersionUID = 7249069246863182397L; - - /** - * Version of max for use in retryUpdate - */ - final long fn(long v, long x) { return v > x ? v : x; } - - /** - * Creates a new instance with initial maximum of {@code - * Long.MIN_VALUE}. - */ - public LongMaxUpdater() { - base = Long.MIN_VALUE; - } - - /** - * Creates a new instance with the given initialValue - */ - public LongMaxUpdater(long initialValue) { - base = initialValue; - } - - - /** - * Updates the maximum to be at least the given value. - * - * @param x the value to update - */ - public void update(long x) { - Cell[] as; long b, v; HashCode hc; Cell a; int n; - if ((as = cells) != null || - (b = base) < x && !casBase(b, x)) { - boolean uncontended = true; - int h = (hc = threadHashCode.get()).code; - if (as == null || (n = as.length) < 1 || - (a = as[(n - 1) & h]) == null || - ((v = a.value) < x && !(uncontended = a.cas(v, x)))) - retryUpdate(x, hc, uncontended); - } - } - - /** - * Returns the current maximum. The returned value is - * NOT an atomic snapshot; invocation in the absence of - * concurrent updates returns an accurate result, but concurrent - * updates that occur while the value is being calculated might - * not be incorporated. - * - * @return the maximum - */ - public long max() { - Cell[] as = cells; - long max = base; - if (as != null) { - int n = as.length; - long v; - for (int i = 0; i < n; ++i) { - Cell a = as[i]; - if (a != null && (v = a.value) > max) - max = v; - } - } - return max; - } - - /** - * Resets variables maintaining updates to {@code Long.MIN_VALUE}. - * This method may be a useful alternative to creating a new - * updater, but is only effective if there are no concurrent - * updates. Because this method is intrinsically racy, it should - * only be used when it is known that no threads are concurrently - * updating. - */ - public void reset() { - internalReset(Long.MIN_VALUE); - } - - /** - * Equivalent in effect to {@link #max} followed by {@link - * #reset}. This method may apply for example during quiescent - * points between multithreaded computations. If there are - * updates concurrent with this method, the returned value is - * not guaranteed to be the final value occurring before - * the reset. - * - * @return the maximum - */ - public long maxThenReset(long newValue) { - Cell[] as = cells; - long max = base; - base = newValue; - if (as != null) { - int n = as.length; - for (int i = 0; i < n; ++i) { - Cell a = as[i]; - if (a != null) { - long v = a.value; - a.value = newValue; - if (v > max) - max = v; - } - } - } - return max; - } - - /** - * Returns the String representation of the {@link #max}. - * @return the String representation of the {@link #max} - */ - public String toString() { - return Long.toString(max()); - } - - /** - * Equivalent to {@link #max}. - * - * @return the maximum - */ - public long longValue() { - return max(); - } - - /** - * Returns the {@link #max} as an {@code int} after a narrowing - * primitive conversion. - */ - public int intValue() { - return (int)max(); - } - - /** - * Returns the {@link #max} as a {@code float} - * after a widening primitive conversion. - */ - public float floatValue() { - return (float)max(); - } - - /** - * Returns the {@link #max} as a {@code double} after a widening - * primitive conversion. - */ - public double doubleValue() { - return (double)max(); - } - - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - s.defaultWriteObject(); - s.writeLong(max()); - } - - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - s.defaultReadObject(); - busy = 0; - cells = null; - base = s.readLong(); - } - -} diff --git a/kamon-core/src/main/java/kamon/jsr166/Striped64.java b/kamon-core/src/main/java/kamon/jsr166/Striped64.java deleted file mode 100644 index 8fbfa4ba..00000000 --- a/kamon-core/src/main/java/kamon/jsr166/Striped64.java +++ /dev/null @@ -1,371 +0,0 @@ -/* - -Note: this was copied from Doug Lea's CVS repository - http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ - -Striped64.java version 1.8 - -*/ - - -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package kamon.jsr166; - -import java.util.Random; - -/** - * A package-local class holding common representation and mechanics - * for classes supporting dynamic striping on 64bit values. The class - * extends Number so that concrete subclasses must publicly do so. - */ -abstract class Striped64 extends Number { - /* - * This class maintains a lazily-initialized table of atomically - * updated variables, plus an extra "base" field. The table size - * is a power of two. Indexing uses masked per-thread hash codes. - * Nearly all declarations in this class are package-private, - * accessed directly by subclasses. - * - * Table entries are of class Cell; a variant of AtomicLong padded - * to reduce cache contention on most processors. Padding is - * overkill for most Atomics because they are usually irregularly - * scattered in memory and thus don't interfere much with each - * other. But Atomic objects residing in arrays will tend to be - * placed adjacent to each other, and so will most often share - * cache lines (with a huge negative performance impact) without - * this precaution. - * - * In part because Cells are relatively large, we avoid creating - * them until they are needed. When there is no contention, all - * updates are made to the base field. Upon first contention (a - * failed CAS on base update), the table is initialized to size 2. - * The table size is doubled upon further contention until - * reaching the nearest power of two greater than or equal to the - * number of CPUS. Table slots remain empty (null) until they are - * needed. - * - * A single spinlock ("busy") is used for initializing and - * resizing the table, as well as populating slots with new Cells. - * There is no need for a blocking lock; when the lock is not - * available, threads try other slots (or the base). During these - * retries, there is increased contention and reduced locality, - * which is still better than alternatives. - * - * Per-thread hash codes are initialized to random values. - * Contention and/or table collisions are indicated by failed - * CASes when performing an update operation (see method - * retryUpdate). Upon a collision, if the table size is less than - * the capacity, it is doubled in size unless some other thread - * holds the lock. If a hashed slot is empty, and lock is - * available, a new Cell is created. Otherwise, if the slot - * exists, a CAS is tried. Retries proceed by "double hashing", - * using a secondary hash (Marsaglia XorShift) to try to find a - * free slot. - * - * The table size is capped because, when there are more threads - * than CPUs, supposing that each thread were bound to a CPU, - * there would exist a perfect hash function mapping threads to - * slots that eliminates collisions. When we reach capacity, we - * search for this mapping by randomly varying the hash codes of - * colliding threads. Because search is random, and collisions - * only become known via CAS failures, convergence can be slow, - * and because threads are typically not bound to CPUS forever, - * may not occur at all. However, despite these limitations, - * observed contention rates are typically low in these cases. - * - * It is possible for a Cell to become unused when threads that - * once hashed to it terminate, as well as in the case where - * doubling the table causes no thread to hash to it under - * expanded mask. We do not try to detect or remove such cells, - * under the assumption that for long-running instances, observed - * contention levels will recur, so the cells will eventually be - * needed again; and for short-lived ones, it does not matter. - */ - - /** - * Padded variant of AtomicLong supporting only raw accesses plus CAS. - * The value field is placed between pads, hoping that the JVM doesn't - * reorder them. - * - * JVM intrinsics note: It would be possible to use a release-only - * form of CAS here, if it were provided. - */ - static final class Cell { - volatile long p0, p1, p2, p3, p4, p5, p6; - volatile long value; - volatile long q0, q1, q2, q3, q4, q5, q6; - Cell(long x) { value = x; } - - final boolean cas(long cmp, long val) { - return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); - } - - final long getAndSet(long val) { - long v; - do { - v = UNSAFE.getLongVolatile(this, valueOffset); - } while (!UNSAFE.compareAndSwapLong(this, valueOffset, v, val)); - return v; - } - - // Unsafe mechanics - private static final sun.misc.Unsafe UNSAFE; - private static final long valueOffset; - static { - try { - UNSAFE = getUnsafe(); - Class ak = Cell.class; - valueOffset = UNSAFE.objectFieldOffset - (ak.getDeclaredField("value")); - } catch (Exception e) { - throw new Error(e); - } - } - - } - - /** - * Holder for the thread-local hash code. The code is initially - * random, but may be set to a different value upon collisions. - */ - static final class HashCode { - static final Random rng = new Random(); - int code; - HashCode() { - int h = rng.nextInt(); // Avoid zero to allow xorShift rehash - code = (h == 0) ? 1 : h; - } - } - - /** - * The corresponding ThreadLocal class - */ - static final class ThreadHashCode extends ThreadLocal { - public HashCode initialValue() { return new HashCode(); } - } - - /** - * Static per-thread hash codes. Shared across all instances to - * reduce ThreadLocal pollution and because adjustments due to - * collisions in one table are likely to be appropriate for - * others. - */ - static final ThreadHashCode threadHashCode = new ThreadHashCode(); - - /** Number of CPUS, to place bound on table size */ - static final int NCPU = Runtime.getRuntime().availableProcessors(); - - /** - * Table of cells. When non-null, size is a power of 2. - */ - transient volatile Cell[] cells; - - /** - * Base value, used mainly when there is no contention, but also as - * a fallback during table initialization races. Updated via CAS. - */ - transient volatile long base; - - /** - * Spinlock (locked via CAS) used when resizing and/or creating Cells. - */ - transient volatile int busy; - - /** - * Package-private default constructor - */ - Striped64() { - } - - /** - * CASes the base field. - */ - final boolean casBase(long cmp, long val) { - return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); - } - - /** - * CASes the base field. - */ - final long getAndSetBase(long val) { - long v; - do { - v = UNSAFE.getLongVolatile(this, baseOffset); - } while (!UNSAFE.compareAndSwapLong(this, baseOffset, v, val)); - return v; - } - - /** - * CASes the busy field from 0 to 1 to acquire lock. - */ - final boolean casBusy() { - return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); - } - - /** - * Computes the function of current and new value. Subclasses - * should open-code this update function for most uses, but the - * virtualized form is needed within retryUpdate. - * - * @param currentValue the current value (of either base or a cell) - * @param newValue the argument from a user update call - * @return result of the update function - */ - abstract long fn(long currentValue, long newValue); - - /** - * Handles cases of updates involving initialization, resizing, - * creating new Cells, and/or contention. See above for - * explanation. This method suffers the usual non-modularity - * problems of optimistic retry code, relying on rechecked sets of - * reads. - * - * @param x the value - * @param hc the hash code holder - * @param wasUncontended false if CAS failed before call - */ - final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { - int h = hc.code; - boolean collide = false; // True if last slot nonempty - for (;;) { - Cell[] as; Cell a; int n; long v; - if ((as = cells) != null && (n = as.length) > 0) { - if ((a = as[(n - 1) & h]) == null) { - if (busy == 0) { // Try to attach new Cell - Cell r = new Cell(x); // Optimistically create - if (busy == 0 && casBusy()) { - boolean created = false; - try { // Recheck under lock - Cell[] rs; int m, j; - if ((rs = cells) != null && - (m = rs.length) > 0 && - rs[j = (m - 1) & h] == null) { - rs[j] = r; - created = true; - } - } finally { - busy = 0; - } - if (created) - break; - continue; // Slot is now non-empty - } - } - collide = false; - } - else if (!wasUncontended) // CAS already known to fail - wasUncontended = true; // Continue after rehash - else if (a.cas(v = a.value, fn(v, x))) - break; - else if (n >= NCPU || cells != as) - collide = false; // At max size or stale - else if (!collide) - collide = true; - else if (busy == 0 && casBusy()) { - try { - if (cells == as) { // Expand table unless stale - Cell[] rs = new Cell[n << 1]; - for (int i = 0; i < n; ++i) - rs[i] = as[i]; - cells = rs; - } - } finally { - busy = 0; - } - collide = false; - continue; // Retry with expanded table - } - h ^= h << 13; // Rehash - h ^= h >>> 17; - h ^= h << 5; - } - else if (busy == 0 && cells == as && casBusy()) { - boolean init = false; - try { // Initialize table - if (cells == as) { - Cell[] rs = new Cell[2]; - rs[h & 1] = new Cell(x); - cells = rs; - init = true; - } - } finally { - busy = 0; - } - if (init) - break; - } - else if (casBase(v = base, fn(v, x))) - break; // Fall back on using base - } - hc.code = h; // Record index for next time - } - - - /** - * Sets base and all cells to the given value. - */ - final void internalReset(long initialValue) { - Cell[] as = cells; - base = initialValue; - if (as != null) { - int n = as.length; - for (int i = 0; i < n; ++i) { - Cell a = as[i]; - if (a != null) - a.value = initialValue; - } - } - } - - // Unsafe mechanics - private static final sun.misc.Unsafe UNSAFE; - private static final long baseOffset; - private static final long busyOffset; - static { - try { - UNSAFE = getUnsafe(); - Class sk = Striped64.class; - baseOffset = UNSAFE.objectFieldOffset - (sk.getDeclaredField("base")); - busyOffset = UNSAFE.objectFieldOffset - (sk.getDeclaredField("busy")); - } catch (Exception e) { - throw new Error(e); - } - } - - /** - * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. - * Replace with a simple call to Unsafe.getUnsafe when integrating - * into a jdk. - * - * @return a sun.misc.Unsafe - */ - private static sun.misc.Unsafe getUnsafe() { - try { - return sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) {} - try { - return java.security.AccessController.doPrivileged - (new java.security.PrivilegedExceptionAction() { - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) - return k.cast(x); - } - throw new NoSuchFieldError("the Unsafe"); - }}); - } catch (java.security.PrivilegedActionException e) { - throw new RuntimeException("Could not initialize intrinsics", - e.getCause()); - } - } -} diff --git a/kamon-core/src/main/java/kamon/util/GlobPathFilter.java b/kamon-core/src/main/java/kamon/util/GlobPathFilter.java deleted file mode 100644 index ac5d08a6..00000000 --- a/kamon-core/src/main/java/kamon/util/GlobPathFilter.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * ========================================================================================= - * Copyright 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util; - -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * Default implementation of PathFilter. Uses glob based includes and excludes to determine whether to export. - * - * @author John E. Bailey - * @author David M. Lloyd - */ -public final class GlobPathFilter implements PathFilter { - private static final Pattern GLOB_PATTERN = Pattern.compile("(\\*\\*?)|(\\?)|(\\\\.)|(/+)|([^*?]+)"); - - private final String glob; - private final Pattern pattern; - - /** - * Construct a new instance. - * - * @param glob the path glob to match - */ - public GlobPathFilter(final String glob) { - pattern = getGlobPattern(glob); - this.glob = glob; - } - - /** - * Determine whether a path should be accepted. - * - * @param path the path to check - * @return true if the path should be accepted, false if not - */ - public boolean accept(final String path) { - return pattern.matcher(path).matches(); - } - - /** - * Get a regular expression pattern which accept any path names which match the given glob. The glob patterns - * function similarly to {@code ant} file patterns. Valid metacharacters in the glob pattern include: - *

- * In addition, any glob pattern matches all subdirectories thereof. A glob pattern ending in {@code /} is equivalent - * to a glob pattern ending in /** in that the named directory is not itself included in the glob. - *

- * See also: "Patterns" in the Ant Manual - * - * @param glob the glob to match - * - * @return the pattern - */ - private static Pattern getGlobPattern(final String glob) { - StringBuilder patternBuilder = new StringBuilder(); - final Matcher m = GLOB_PATTERN.matcher(glob); - boolean lastWasSlash = false; - while (m.find()) { - lastWasSlash = false; - String grp; - if ((grp = m.group(1)) != null) { - // match a * or ** - if (grp.length() == 2) { - // it's a *workers are able to process multiple metrics* - patternBuilder.append(".*"); - } else { - // it's a * - patternBuilder.append("[^/]*"); - } - } else if ((grp = m.group(2)) != null) { - // match a '?' glob pattern; any non-slash character - patternBuilder.append("[^/]"); - } else if ((grp = m.group(3)) != null) { - // backslash-escaped value - patternBuilder.append(Pattern.quote(m.group().substring(1))); - } else if ((grp = m.group(4)) != null) { - // match any number of / chars - patternBuilder.append("/+"); - lastWasSlash = true; - } else { - // some other string - patternBuilder.append(Pattern.quote(m.group())); - } - } - if (lastWasSlash) { - // ends in /, append ** - patternBuilder.append(".*"); - } - return Pattern.compile(patternBuilder.toString()); - } -} diff --git a/kamon-core/src/main/resources/META-INF/aop.xml b/kamon-core/src/main/resources/META-INF/aop.xml deleted file mode 100644 index b13f9aac..00000000 --- a/kamon-core/src/main/resources/META-INF/aop.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/kamon-core/src/main/resources/reference.conf b/kamon-core/src/main/resources/reference.conf deleted file mode 100644 index 48441493..00000000 --- a/kamon-core/src/main/resources/reference.conf +++ /dev/null @@ -1,184 +0,0 @@ -# ================================== # -# Kamon-Core Reference Configuration # -# ================================== # - -kamon { - metric { - - # Time interval for collecting all metrics and send the snapshots to all subscribed actors. - tick-interval = 10 seconds - - # Default size for the LongBuffer that gets allocated for metrics collection and merge. The - # value should correspond to the highest number of different buckets with values that might - # exist in a single histogram during a metrics collection. The default value of 33792 is a - # very conservative value and its equal to the total number of buckets required to cover values - # from 1 nanosecond to 1 hour with 0.1% precision (3 significant value digits). That means - # that would need to have at least one measurement on every bucket of a single histogram to - # fully utilize this buffer, which is *really* unlikely to ever happen. Since the buffer should - # be allocated once and reused it shouldn't impose a memory footprint issue. - default-collection-context-buffer-size = 33792 - - # Disables a big error message that will be typically logged if your application wasn't started - # with the -javaagent:/path-to-aspectj-weaver.jar option. If you are only using KamonStandalone - # it might be ok for you to turn this error off. - disable-aspectj-weaver-missing-error = false - - # Specify if entities that do not match any include/exclude filter should be tracked. - track-unmatched-entities = yes - - filters { - trace { - includes = [ "**" ] - excludes = [ ] - } - } - - # Default instrument settings for histograms, min max counters and gaugues. The actual settings to be used when - # creating a instrument is determined by merging the default settings, code settings and specific instrument - # settings using the following priorities (top wins): - - # - any setting in `kamon.metric.instrument-settings` for the given category/instrument. - # - code settings provided when creating the instrument. - # - `default-instrument-settings`. - # - default-instrument-settings { - histogram { - precision = normal - lowest-discernible-value = 1 - highest-trackable-value = 3600000000000 - } - - min-max-counter { - precision = normal - lowest-discernible-value = 1 - highest-trackable-value = 999999999 - refresh-interval = 100 milliseconds - } - - gauge { - precision = normal - lowest-discernible-value = 1 - highest-trackable-value = 3600000000000 - refresh-interval = 100 milliseconds - } - - } - - # Custom configurations for category instruments. The settings provided in this section will override the default - # and code instrument settings as explained in the `default-instrument-settings` key. There is no need to provide - # full instrument settings in this section, only the settings that should be overriden must be included. Example: - # if you wish to change the precision and lowest discernible value of the `elapsed-time` instrument for the `trace` - # category, you should include the following configuration in your application.conf file: - # - # kamon.metric.instrument-settings.trace { - # elapsed-time { - # precision = fine - # lowest-discernible-value = 1000 - # } - # } - # - # In this example, the value for the `highest-trackable-value` setting will be either the code setting or the default - # setting, depending on how the `elapsed-time` metric is created. - instrument-settings { - - } - } - - - trace { - - # Level of detail used when recording trace information. The possible values are: - # - metrics-only: metrics for all included traces and all segments are recorded, but no Trace messages will be sent - # to the subscribers of trace data. - # - simple-trace: metrics for all included traces and all segments are recorded and additionally a Trace message - # containing the trace and segments details and metadata. - level-of-detail = metrics-only - - # Sampling strategy to apply when the tracing level is set to `simple-trace`. The options are: all, random, ordered, - # threshold and clock. The details of each sampler are below. - sampling = random - - # Use a ThreadLocalRandom to generate numbers between 1 and 100, if the random number is less or equal to .chance - # then tracing information will be gathered and reported for the current trace. - random-sampler { - chance = 10 - } - - # Use a AtomicLong to ensure that every .sample-interval number of requests tracing information will be gathered and - # reported. - ordered-sampler { - # must be power of two - sample-interval = 8 - } - - # Fully qualified name of the function that will be used for generating tokens to traces. - token-generator = kamon.trace.DefaultTokenGenerator - - # Gather tracing information for all traces but only report those whose elapsed-time is equal or greated to the - # .minimum-elapsed-time setting. - threshold-sampler { - minimum-elapsed-time = 1 second - } - - # Use a FiniteDuration to only record a trace each .pause nanoseconds. - clock-sampler { - pause = 1 second - } - - incubator { - # Minimum time to stay in the trace incubator before checking if the trace should not be incubated anymore. No - # checks are made at least until this period has passed. - min-incubation-time = 5 seconds - - # Time to wait between incubation checks. After min-incubation-time, a trace is checked using this interval and if - # if shouldn't be incubated anymore, the TraceInfo is collected and reported for it. - check-interval = 1 second - - # Max amount of time that a trace can be in the incubator. If this time is reached for a given trace then it will - # be reported with whatever information is available at the moment, logging a warning for each segment that remains - # open after this point. - max-incubation-time = 20 seconds - } - } - - # All settings included under the internal-config key will be used to repleace the akka.* and spray.* settings. By - # doing this we avoid applying custom settings that might make sense for the user application to the internal actor - # system and Spray facilities used by Kamon. - internal-config { - - akka.actor { - provider = "local" - default-dispatcher { - fork-join-executor { - parallelism-min = 2 - parallelism-factor = 2.0 - parallelism-max = 10 - } - } - } - - spray { - - } - } - - # Controls whether the AspectJ Weaver missing warning should be displayed if any Kamon module requiring AspectJ is - # found in the classpath but the application is started without the AspectJ Weaver. - show-aspectj-missing-warning = yes - - modules { - # Just a place holder to ensure that the key is always available. Non-core Kamon modules should provide their - # settings in a module-info section. - } - - # Add tags to all reported metrics. Can be useful to identify the source of metrics from a particluar JVM instance. - # Example: - # - # default-tags { - # host: ${?HOSTNAME} - # container-name: ${?CONTAINER_NAME} - # } - default-tags { - - } -} \ No newline at end of file diff --git a/kamon-core/src/main/scala-2.10/kamon/ActorSystemTools.scala b/kamon-core/src/main/scala-2.10/kamon/ActorSystemTools.scala deleted file mode 100644 index 01dd4234..00000000 --- a/kamon-core/src/main/scala-2.10/kamon/ActorSystemTools.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ -package kamon - -import scala.util.control.NonFatal - -import akka.actor.ActorSystem - -object ActorSystemTools { - private[kamon] def terminateActorSystem(system: ActorSystem): Unit = { - system.shutdown() - } -} diff --git a/kamon-core/src/main/scala-2.11/kamon/ActorSystemTools.scala b/kamon-core/src/main/scala-2.11/kamon/ActorSystemTools.scala deleted file mode 100644 index 01dd4234..00000000 --- a/kamon-core/src/main/scala-2.11/kamon/ActorSystemTools.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ -package kamon - -import scala.util.control.NonFatal - -import akka.actor.ActorSystem - -object ActorSystemTools { - private[kamon] def terminateActorSystem(system: ActorSystem): Unit = { - system.shutdown() - } -} diff --git a/kamon-core/src/main/scala-2.12/kamon/ActorSystemTools.scala b/kamon-core/src/main/scala-2.12/kamon/ActorSystemTools.scala deleted file mode 100644 index 762201d5..00000000 --- a/kamon-core/src/main/scala-2.12/kamon/ActorSystemTools.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ -package kamon - -import akka.actor.ActorSystem - -import scala.util.control.NonFatal - -object ActorSystemTools { - private[kamon] def terminateActorSystem(system: ActorSystem): Unit = { - system.terminate() - } -} diff --git a/kamon-core/src/main/scala/kamon/Kamon.scala b/kamon-core/src/main/scala/kamon/Kamon.scala deleted file mode 100644 index c02d0505..00000000 --- a/kamon-core/src/main/scala/kamon/Kamon.scala +++ /dev/null @@ -1,115 +0,0 @@ -/* ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ -package kamon - -import _root_.akka.actor -import _root_.akka.actor._ -import com.typesafe.config.{Config, ConfigFactory, ConfigParseOptions, ConfigResolveOptions} -import kamon.metric._ -import kamon.trace.TracerModuleImpl -import kamon.util.logger.LazyLogger - -import _root_.scala.util.control.NonFatal -import _root_.scala.util.{Failure, Success, Try} - -object Kamon { - trait Extension extends actor.Extension - - @volatile private var kamonInstance = new Instance() - - def config = kamonInstance.config - def metrics = kamonInstance.metrics - def tracer = kamonInstance.tracer - - def start(): Unit = synchronized { - kamonInstance.start() - } - - def start(conf: Config): Unit = synchronized { - kamonInstance.start(conf) - } - - def shutdown(): Unit = synchronized { - kamonInstance.shutdown() - kamonInstance = new Instance() - } - - private class Instance() { - private val log = LazyLogger(classOf[Instance]) - private var actorSystem: ActorSystem = _ - - var started = false - var config: Config = defaultConfig - val metrics = MetricsModuleImpl(config) - val tracer = TracerModuleImpl(metrics, config) - - private lazy val _start = { - log.info("Initializing Kamon...") - tryLoadAutoweaveModule() - actorSystem = ActorSystem("kamon", config) - metrics.start(actorSystem, config) - tracer.start(actorSystem, config) - actorSystem.registerExtension(ModuleLoader) - started = true - } - - def start(): Unit = { - _start - } - - def start(conf: Config): Unit = { - config = patchConfiguration(conf) - _start - } - - def shutdown(): Unit = { - if (started) { - ActorSystemTools.terminateActorSystem(actorSystem) - } - } - - private def defaultConfig = { - patchConfiguration( - ConfigFactory.load( - Thread.currentThread().getContextClassLoader(), - ConfigParseOptions.defaults(), - ConfigResolveOptions.defaults().setAllowUnresolved(true) - ) - ) - } - - private def patchConfiguration(config: Config): Config = { - val internalConfig = config.getConfig("kamon.internal-config") - config - .withoutPath("akka") - .withoutPath("spray") - .withFallback(internalConfig) - } - - private def tryLoadAutoweaveModule(): Unit = { - Try { - val autoweave = Class.forName("kamon.autoweave.Autoweave") - autoweave.getDeclaredMethod("attach").invoke(autoweave.newInstance()) - } match { - case Success(_) ⇒ - val color = (msg: String) ⇒ s"""\u001B[32m${msg}\u001B[0m""" - log.info(color("Kamon-autoweave has been successfully loaded.")) - log.info(color("The AspectJ load time weaving agent is now attached to the JVM (you don't need to use -javaagent).")) - log.info(color("This offers extra flexibility but obviously any classes loaded before attachment will not be woven.")) - case Failure(NonFatal(reason)) ⇒ log.debug(s"Kamon-autoweave failed to load. Reason: ${reason.getMessage}.") - } - } - } -} diff --git a/kamon-core/src/main/scala/kamon/ModuleLoader.scala b/kamon-core/src/main/scala/kamon/ModuleLoader.scala deleted file mode 100644 index f1b5f414..00000000 --- a/kamon-core/src/main/scala/kamon/ModuleLoader.scala +++ /dev/null @@ -1,128 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon - -import _root_.akka.actor -import _root_.akka.actor._ -import kamon.util.logger.LazyLogger -import org.aspectj.lang.ProceedingJoinPoint -import org.aspectj.lang.annotation.{Around, Aspect, Pointcut} - -private[kamon] object ModuleLoader extends ExtensionId[ModuleLoaderExtension] with ExtensionIdProvider { - def lookup(): ExtensionId[_ <: actor.Extension] = ModuleLoader - def createExtension(system: ExtendedActorSystem): ModuleLoaderExtension = new ModuleLoaderExtension(system) -} - -private[kamon] class ModuleLoaderExtension(system: ExtendedActorSystem) extends Kamon.Extension { - val log = LazyLogger(getClass) - val settings = ModuleLoaderSettings(system) - - if (settings.modulesRequiringAspectJ.nonEmpty && !isAspectJPresent && settings.showAspectJMissingWarning) - logAspectJWeaverMissing(settings.modulesRequiringAspectJ) - - // Force initialization of all modules marked with auto-start. - settings.availableModules.filter(_.startInfo.nonEmpty).foreach { - case AvailableModuleInfo(name, requiresAJ, Some(ModuleStartInfo(autoStart, extensionClass))) if autoStart ⇒ - - system.dynamicAccess.getObjectFor[ExtensionId[Kamon.Extension]](extensionClass).map { moduleID ⇒ - log.debug(s"Auto starting the [$name] module.") - moduleID.get(system) - - } recover { - case th: Throwable ⇒ log.error(s"Failed to auto start the [$name] module.", th) - } - - case other ⇒ - - } - - // When AspectJ is present the kamon.supervisor.AspectJPresent aspect will make this return true. - def isAspectJPresent: Boolean = false - - def logAspectJWeaverMissing(modulesRequiringAspectJ: List[AvailableModuleInfo]): Unit = { - val moduleNames = modulesRequiringAspectJ.map(_.name).mkString(", ") - val weaverMissingMessage = - """ - | - | ___ _ ___ _ _ ___ ___ _ _ - | / _ \ | | |_ | | | | | | \/ |(_) (_) - |/ /_\ \ ___ _ __ ___ ___ | |_ | | | | | | ___ __ _ __ __ ___ _ __ | . . | _ ___ ___ _ _ __ __ _ - || _ |/ __|| '_ \ / _ \ / __|| __| | | | |/\| | / _ \ / _` |\ \ / // _ \| '__| | |\/| || |/ __|/ __|| || '_ \ / _` | - || | | |\__ \| |_) || __/| (__ | |_ /\__/ / \ /\ /| __/| (_| | \ V /| __/| | | | | || |\__ \\__ \| || | | || (_| | - |\_| |_/|___/| .__/ \___| \___| \__|\____/ \/ \/ \___| \__,_| \_/ \___||_| \_| |_/|_||___/|___/|_||_| |_| \__, | - | | | __/ | - | |_| |___/ - | - | It seems like your application was not started with the -javaagent:/path-to-aspectj-weaver.jar option but Kamon detected - | the following modules which require AspectJ to work properly: - | - """.stripMargin + moduleNames + - """ - | - | If you need help on setting up the aspectj weaver go to http://kamon.io/introduction/get-started/ for more info. On the - | other hand, if you are sure that you do not need or do not want to use the weaver then you can disable this error message - | by changing the kamon.show-aspectj-missing-warning setting in your configuration file. - | - """.stripMargin - - log.error(weaverMissingMessage) - } -} - -private[kamon] case class AvailableModuleInfo(name: String, requiresAspectJ: Boolean, startInfo: Option[ModuleStartInfo]) -private[kamon] case class ModuleStartInfo(autoStart: Boolean, extensionClass: String) -private[kamon] case class ModuleLoaderSettings(showAspectJMissingWarning: Boolean, availableModules: List[AvailableModuleInfo]) { - val modulesRequiringAspectJ = availableModules.filter(_.requiresAspectJ) -} - -private[kamon] object ModuleLoaderSettings { - - def apply(system: ActorSystem): ModuleLoaderSettings = { - import kamon.util.ConfigTools.Syntax - - val config = system.settings.config.getConfig("kamon.modules") - val showAspectJMissingWarning = system.settings.config.getBoolean("kamon.show-aspectj-missing-warning") - - val modules = config.firstLevelKeys - val availableModules = modules.map { moduleName ⇒ - val moduleConfig = config.getConfig(moduleName) - val requiresAspectJ = moduleConfig.getBoolean("requires-aspectj") - - val startInfo = - if (moduleConfig.hasPath("auto-start") && moduleConfig.hasPath("extension-class")) - Some(ModuleStartInfo(moduleConfig.getBoolean("auto-start"), moduleConfig.getString("extension-class"))) - else - None - - AvailableModuleInfo(moduleName, requiresAspectJ, startInfo) - - } toList - - ModuleLoaderSettings(showAspectJMissingWarning, availableModules) - } -} - -@Aspect -private[kamon] class AspectJPresent { - - @Pointcut("execution(* kamon.ModuleLoaderExtension.isAspectJPresent())") - def isAspectJPresentAtModuleSupervisor(): Unit = {} - - @Around("isAspectJPresentAtModuleSupervisor()") - def aroundIsAspectJPresentAtModuleSupervisor(pjp: ProceedingJoinPoint): Boolean = true - -} diff --git a/kamon-core/src/main/scala/kamon/metric/Entity.scala b/kamon-core/src/main/scala/kamon/metric/Entity.scala deleted file mode 100644 index 91249af0..00000000 --- a/kamon-core/src/main/scala/kamon/metric/Entity.scala +++ /dev/null @@ -1,37 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -/** - * Identify a `thing` that is being monitored by Kamon. A [[kamon.metric.Entity]] is used to identify tracked `things` - * in both the metrics recording and reporting sides. Only the name and category fields are used with determining - * equality between two entities. - * - * // TODO: Find a better word for `thing`. - */ -case class Entity(name: String, category: String, tags: Map[String, String]) - -object Entity { - def apply(name: String, category: String): Entity = - apply(name, category, Map.empty) - - def create(name: String, category: String): Entity = - apply(name, category, Map.empty) - - def create(name: String, category: String, tags: Map[String, String]): Entity = - new Entity(name, category, tags) -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/EntityRecorder.scala b/kamon-core/src/main/scala/kamon/metric/EntityRecorder.scala deleted file mode 100644 index e3b136dd..00000000 --- a/kamon-core/src/main/scala/kamon/metric/EntityRecorder.scala +++ /dev/null @@ -1,235 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import kamon.metric.instrument.Gauge.CurrentValueCollector -import kamon.metric.instrument.Histogram.DynamicRange -import kamon.metric.instrument._ -import kamon.util.Function - -import scala.collection.concurrent.TrieMap -import scala.concurrent.duration.FiniteDuration - -trait EntityRecorder { - def collect(collectionContext: CollectionContext): EntitySnapshot - def cleanup: Unit -} - -trait EntityRecorderFactory[T <: EntityRecorder] { - def category: String - def createRecorder(instrumentFactory: InstrumentFactory): T -} - -abstract class EntityRecorderFactoryCompanion[T <: EntityRecorder](val category: String, builder: (InstrumentFactory) ⇒ T) - extends EntityRecorderFactory[T] { - - def createRecorder(instrumentFactory: InstrumentFactory): T = builder(instrumentFactory) -} - -object EntityRecorderFactory { - def apply[T <: EntityRecorder](entityCategory: String, factory: InstrumentFactory ⇒ T): EntityRecorderFactory[T] = - new EntityRecorderFactory[T] { - def category: String = entityCategory - def createRecorder(instrumentFactory: InstrumentFactory): T = factory(instrumentFactory) - } - - def create[T <: EntityRecorder](entityCategory: String, factory: Function[InstrumentFactory, T]): EntityRecorderFactory[T] = - new EntityRecorderFactory[T] { - def category: String = entityCategory - def createRecorder(instrumentFactory: InstrumentFactory): T = factory(instrumentFactory) - } -} - -private[kamon] sealed trait SingleInstrumentEntityRecorder extends EntityRecorder { - def key: MetricKey - def instrument: Instrument - - def collect(collectionContext: CollectionContext): EntitySnapshot = - new DefaultEntitySnapshot(Map(key → instrument.collect(collectionContext))) - - def cleanup: Unit = instrument.cleanup -} - -object SingleInstrumentEntityRecorder { - val Histogram = "histogram" - val MinMaxCounter = "min-max-counter" - val Gauge = "gauge" - val Counter = "counter" - - val AllCategories = List("histogram", "gauge", "counter", "min-max-counter") -} - -/** - * Entity recorder for a single Counter instrument. - */ -case class CounterRecorder(key: MetricKey, instrument: Counter) extends SingleInstrumentEntityRecorder - -/** - * Entity recorder for a single Histogram instrument. - */ -case class HistogramRecorder(key: MetricKey, instrument: Histogram) extends SingleInstrumentEntityRecorder - -/** - * Entity recorder for a single MinMaxCounter instrument. - */ -case class MinMaxCounterRecorder(key: MetricKey, instrument: MinMaxCounter) extends SingleInstrumentEntityRecorder - -/** - * Entity recorder for a single Gauge instrument. - */ -case class GaugeRecorder(key: MetricKey, instrument: Gauge) extends SingleInstrumentEntityRecorder - -/** - * Base class with plenty of utility methods to facilitate the creation of [[EntityRecorder]] implementations. - * It is not required to use this base class for defining a custom [[EntityRecorder]], but it is certainly - * the most convenient way to do it and the preferred approach throughout the Kamon codebase. - */ -abstract class GenericEntityRecorder(instrumentFactory: InstrumentFactory) extends EntityRecorder { - import kamon.util.TriemapAtomicGetOrElseUpdate.Syntax - - private val _instruments = TrieMap.empty[MetricKey, Instrument] - private def register[T <: Instrument](key: MetricKey, instrument: ⇒ T): T = - _instruments.atomicGetOrElseUpdate(key, instrument, _.cleanup).asInstanceOf[T] - - protected def histogram(name: String): Histogram = - register(HistogramKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createHistogram(name)) - - protected def histogram(name: String, dynamicRange: DynamicRange): Histogram = - register(HistogramKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createHistogram(name, Some(dynamicRange))) - - protected def histogram(name: String, unitOfMeasurement: UnitOfMeasurement): Histogram = - register(HistogramKey(name, unitOfMeasurement), instrumentFactory.createHistogram(name)) - - protected def histogram(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement): Histogram = - register(HistogramKey(name, unitOfMeasurement), instrumentFactory.createHistogram(name, Some(dynamicRange))) - - protected def removeHistogram(name: String): Unit = - _instruments.remove(HistogramKey(name, UnitOfMeasurement.Unknown)) - - protected def removeHistogram(name: String, unitOfMeasurement: UnitOfMeasurement): Unit = - _instruments.remove(HistogramKey(name, unitOfMeasurement)) - - protected def minMaxCounter(name: String): MinMaxCounter = - register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name)) - - protected def minMaxCounter(name: String, dynamicRange: DynamicRange): MinMaxCounter = - register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange))) - - protected def minMaxCounter(name: String, refreshInterval: FiniteDuration): MinMaxCounter = - register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, refreshInterval = Some(refreshInterval))) - - protected def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = - register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name)) - - protected def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter = - register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange), Some(refreshInterval))) - - protected def minMaxCounter(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = - register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange))) - - protected def minMaxCounter(name: String, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = - register(MinMaxCounterKey(name, unitOfMeasurement), instrumentFactory.createMinMaxCounter(name, refreshInterval = Some(refreshInterval))) - - protected def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = - register(MinMaxCounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createMinMaxCounter(name, Some(dynamicRange), Some(refreshInterval))) - - protected def minMaxCounter(key: MinMaxCounterKey): MinMaxCounter = - register(key, instrumentFactory.createMinMaxCounter(key.name)) - - protected def minMaxCounter(key: MinMaxCounterKey, dynamicRange: DynamicRange): MinMaxCounter = - register(key, instrumentFactory.createMinMaxCounter(key.name, Some(dynamicRange))) - - protected def minMaxCounter(key: MinMaxCounterKey, refreshInterval: FiniteDuration): MinMaxCounter = - register(key, instrumentFactory.createMinMaxCounter(key.name, refreshInterval = Some(refreshInterval))) - - protected def minMaxCounter(key: MinMaxCounterKey, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter = - register(key, instrumentFactory.createMinMaxCounter(key.name, Some(dynamicRange), Some(refreshInterval))) - - protected def removeMinMaxCounter(name: String): Unit = - _instruments.remove(MinMaxCounterKey(name, UnitOfMeasurement.Unknown)) - - protected def removeMinMaxCounter(key: MinMaxCounterKey): Unit = - _instruments.remove(key) - - protected def gauge(name: String, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, valueCollector = valueCollector)) - - protected def gauge(name: String, dynamicRange: DynamicRange, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, Some(dynamicRange), valueCollector = valueCollector)) - - protected def gauge(name: String, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector)) - - protected def gauge(name: String, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, valueCollector = valueCollector)) - - protected def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, Some(dynamicRange), Some(refreshInterval), valueCollector = valueCollector)) - - protected def gauge(name: String, dynamicRange: DynamicRange, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, Some(dynamicRange), valueCollector = valueCollector)) - - protected def gauge(name: String, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createGauge(name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector)) - - protected def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, unitOfMeasurement: UnitOfMeasurement, valueCollector: CurrentValueCollector): Gauge = - register(GaugeKey(name, unitOfMeasurement), instrumentFactory.createGauge(name, Some(dynamicRange), Some(refreshInterval), valueCollector)) - - protected def gauge(key: GaugeKey, valueCollector: CurrentValueCollector): Gauge = - register(key, instrumentFactory.createGauge(key.name, valueCollector = valueCollector)) - - protected def gauge(key: GaugeKey, dynamicRange: DynamicRange, valueCollector: CurrentValueCollector): Gauge = - register(key, instrumentFactory.createGauge(key.name, Some(dynamicRange), valueCollector = valueCollector)) - - protected def gauge(key: GaugeKey, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = - register(key, instrumentFactory.createGauge(key.name, refreshInterval = Some(refreshInterval), valueCollector = valueCollector)) - - protected def gauge(key: GaugeKey, dynamicRange: DynamicRange, refreshInterval: FiniteDuration, valueCollector: CurrentValueCollector): Gauge = - register(key, instrumentFactory.createGauge(key.name, Some(dynamicRange), Some(refreshInterval), valueCollector = valueCollector)) - - protected def removeGauge(name: String): Unit = - _instruments.remove(GaugeKey(name, UnitOfMeasurement.Unknown)) - - protected def removeGauge(key: GaugeKey): Unit = - _instruments.remove(key) - - protected def counter(name: String): Counter = - register(CounterKey(name, UnitOfMeasurement.Unknown), instrumentFactory.createCounter()) - - protected def counter(name: String, unitOfMeasurement: UnitOfMeasurement): Counter = - register(CounterKey(name, unitOfMeasurement), instrumentFactory.createCounter()) - - protected def counter(key: CounterKey): Counter = - register(key, instrumentFactory.createCounter()) - - protected def removeCounter(name: String): Unit = - _instruments.remove(CounterKey(name, UnitOfMeasurement.Unknown)) - - protected def removeCounter(key: CounterKey): Unit = - _instruments.remove(key) - - def collect(collectionContext: CollectionContext): EntitySnapshot = { - val snapshots = Map.newBuilder[MetricKey, InstrumentSnapshot] - _instruments.foreach { - case (key, instrument) ⇒ snapshots += key → instrument.collect(collectionContext) - } - - new DefaultEntitySnapshot(snapshots.result()) - } - - def cleanup: Unit = _instruments.values.foreach(_.cleanup) -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/EntitySnapshot.scala b/kamon-core/src/main/scala/kamon/metric/EntitySnapshot.scala deleted file mode 100644 index 16edecd8..00000000 --- a/kamon-core/src/main/scala/kamon/metric/EntitySnapshot.scala +++ /dev/null @@ -1,63 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import kamon.metric.instrument.{Counter, Histogram, CollectionContext, InstrumentSnapshot} -import kamon.util.MapMerge -import scala.reflect.ClassTag - -trait EntitySnapshot { - def metrics: Map[MetricKey, InstrumentSnapshot] - def merge(that: EntitySnapshot, collectionContext: CollectionContext): EntitySnapshot - - def histogram(name: String): Option[Histogram.Snapshot] = - find[HistogramKey, Histogram.Snapshot](name) - - def minMaxCounter(name: String): Option[Histogram.Snapshot] = - find[MinMaxCounterKey, Histogram.Snapshot](name) - - def gauge(name: String): Option[Histogram.Snapshot] = - find[GaugeKey, Histogram.Snapshot](name) - - def counter(name: String): Option[Counter.Snapshot] = - find[CounterKey, Counter.Snapshot](name) - - def histograms: Map[HistogramKey, Histogram.Snapshot] = - filterByType[HistogramKey, Histogram.Snapshot] - - def minMaxCounters: Map[MinMaxCounterKey, Histogram.Snapshot] = - filterByType[MinMaxCounterKey, Histogram.Snapshot] - - def gauges: Map[GaugeKey, Histogram.Snapshot] = - filterByType[GaugeKey, Histogram.Snapshot] - - def counters: Map[CounterKey, Counter.Snapshot] = - filterByType[CounterKey, Counter.Snapshot] - - private def filterByType[K <: MetricKey, V <: InstrumentSnapshot](implicit keyCT: ClassTag[K]): Map[K, V] = - metrics.collect { case (k, v) if keyCT.runtimeClass.isInstance(k) ⇒ (k.asInstanceOf[K], v.asInstanceOf[V]) } - - private def find[K <: MetricKey, V <: InstrumentSnapshot](name: String)(implicit keyCT: ClassTag[K]) = - metrics.find { case (k, v) ⇒ keyCT.runtimeClass.isInstance(k) && k.name == name } map (_._2.asInstanceOf[V]) -} - -class DefaultEntitySnapshot(val metrics: Map[MetricKey, InstrumentSnapshot]) extends EntitySnapshot { - import MapMerge.Syntax - - override def merge(that: EntitySnapshot, collectionContext: CollectionContext): EntitySnapshot = - new DefaultEntitySnapshot(metrics.merge(that.metrics, (l, r) ⇒ l.merge(r, collectionContext))) -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/MetricKey.scala b/kamon-core/src/main/scala/kamon/metric/MetricKey.scala deleted file mode 100644 index 0d4e0163..00000000 --- a/kamon-core/src/main/scala/kamon/metric/MetricKey.scala +++ /dev/null @@ -1,47 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import kamon.metric.instrument.UnitOfMeasurement - -/** - * MetricKeys are used to identify a given metric in entity recorders and snapshots. - */ -sealed trait MetricKey { - def name: String - def unitOfMeasurement: UnitOfMeasurement -} - -/** - * MetricKey for all Histogram-based metrics. - */ -private[kamon] case class HistogramKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey - -/** - * MetricKey for all MinMaxCounter-based metrics. - */ -private[kamon] case class MinMaxCounterKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey - -/** - * MetricKey for all Gauge-based metrics. - */ -private[kamon] case class GaugeKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey - -/** - * MetricKey for all Counter-based metrics. - */ -private[kamon] case class CounterKey(name: String, unitOfMeasurement: UnitOfMeasurement) extends MetricKey diff --git a/kamon-core/src/main/scala/kamon/metric/MetricScaleDecorator.scala b/kamon-core/src/main/scala/kamon/metric/MetricScaleDecorator.scala deleted file mode 100644 index 06de65ef..00000000 --- a/kamon-core/src/main/scala/kamon/metric/MetricScaleDecorator.scala +++ /dev/null @@ -1,57 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import akka.actor.{Actor, ActorRef, Props} -import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot -import kamon.metric.instrument._ - -/** - * Can be used as a decorator to scale TickMetricSnapshot messages to given `timeUnits` and/or `memoryUnits` - * before forwarding to original receiver - * @param timeUnits Optional time units to scale time metrics to - * @param memoryUnits Optional memory units to scale memory metrics to - * @param receiver Receiver of scaled metrics snapshot, usually a backend sender - */ -class MetricScaleDecorator(timeUnits: Option[Time], memoryUnits: Option[Memory], receiver: ActorRef) extends Actor { - require( - timeUnits.isDefined || memoryUnits.isDefined, - "Use MetricScaleDecorator only when any of units is defined" - ) - - override def receive: Receive = { - case tick: TickMetricSnapshot ⇒ - val scaled = tick.copy(metrics = tick.metrics.mapValues { entitySnapshot ⇒ - new DefaultEntitySnapshot(entitySnapshot.metrics.map { - case (metricKey, metricSnapshot) ⇒ - val scaledSnapshot = (metricKey.unitOfMeasurement, timeUnits, memoryUnits) match { - case (time: Time, Some(to), _) ⇒ metricSnapshot.scale(time, to) - case (memory: Memory, _, Some(to)) ⇒ metricSnapshot.scale(memory, to) - case _ ⇒ metricSnapshot - } - metricKey → scaledSnapshot - }) - }) - receiver forward scaled - } -} - -object MetricScaleDecorator { - def props(timeUnits: Option[Time], memoryUnits: Option[Memory], receiver: ActorRef): Props = - Props(new MetricScaleDecorator(timeUnits, memoryUnits, receiver)) -} - diff --git a/kamon-core/src/main/scala/kamon/metric/MetricsModule.scala b/kamon-core/src/main/scala/kamon/metric/MetricsModule.scala deleted file mode 100755 index 7c85bb02..00000000 --- a/kamon-core/src/main/scala/kamon/metric/MetricsModule.scala +++ /dev/null @@ -1,394 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import java.util.Map.Entry - -import akka.actor._ -import com.typesafe.config.{Config, ConfigValue, ConfigValueType} -import kamon.metric.SubscriptionsDispatcher.{Subscribe, Unsubscribe} -import kamon.metric.instrument.Gauge.CurrentValueCollector -import kamon.metric.instrument.Histogram.DynamicRange -import kamon.metric.instrument._ -import kamon.util.LazyActorRef - -import scala.collection.JavaConverters._ -import scala.collection.concurrent.TrieMap -import scala.concurrent.duration.FiniteDuration - -case class EntityRegistration[T <: EntityRecorder](entity: Entity, recorder: T) - -trait MetricsModule { - def settings: MetricsSettings - - def shouldTrack(entity: Entity): Boolean - - def shouldTrack(entityName: String, category: String): Boolean = - shouldTrack(Entity(entityName, category)) - - // - // Histograms registration and removal - // - - def histogram(name: String): Histogram = - registerHistogram(name) - - def histogram(name: String, unitOfMeasurement: UnitOfMeasurement): Histogram = - registerHistogram(name, unitOfMeasurement = Some(unitOfMeasurement)) - - def histogram(name: String, dynamicRange: DynamicRange): Histogram = - registerHistogram(name, dynamicRange = Some(dynamicRange)) - - def histogram(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): Histogram = - registerHistogram(name, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange)) - - def histogram(name: String, tags: Map[String, String]): Histogram = - registerHistogram(name, tags) - - def histogram(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): Histogram = - registerHistogram(name, tags, Some(unitOfMeasurement)) - - def histogram(name: String, tags: Map[String, String], dynamicRange: DynamicRange): Histogram = - registerHistogram(name, tags, dynamicRange = Some(dynamicRange)) - - def histogram(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): Histogram = - registerHistogram(name, tags, Some(unitOfMeasurement), Some(dynamicRange)) - - def removeHistogram(name: String): Boolean = - removeHistogram(name, Map.empty) - - def registerHistogram(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, - dynamicRange: Option[DynamicRange] = None): Histogram - - def removeHistogram(name: String, tags: Map[String, String]): Boolean - - // - // MinMaxCounter registration and removal - // - - def minMaxCounter(name: String): MinMaxCounter = - registerMinMaxCounter(name) - - def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = - registerMinMaxCounter(name, unitOfMeasurement = Some(unitOfMeasurement)) - - def minMaxCounter(name: String, dynamicRange: DynamicRange): MinMaxCounter = - registerMinMaxCounter(name, dynamicRange = Some(dynamicRange)) - - def minMaxCounter(name: String, refreshInterval: FiniteDuration): MinMaxCounter = - registerMinMaxCounter(name, refreshInterval = Some(refreshInterval)) - - def minMaxCounter(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration): MinMaxCounter = - registerMinMaxCounter(name, dynamicRange = Some(dynamicRange), refreshInterval = Some(refreshInterval)) - - def minMaxCounter(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): MinMaxCounter = - registerMinMaxCounter(name, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange)) - - def minMaxCounter(name: String, tags: Map[String, String]): MinMaxCounter = - registerMinMaxCounter(name, tags) - - def minMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): MinMaxCounter = - registerMinMaxCounter(name, tags, Some(unitOfMeasurement)) - - def minMaxCounter(name: String, tags: Map[String, String], dynamicRange: DynamicRange): MinMaxCounter = - registerMinMaxCounter(name, tags, dynamicRange = Some(dynamicRange)) - - def minMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange): MinMaxCounter = - registerMinMaxCounter(name, tags, Some(unitOfMeasurement), Some(dynamicRange)) - - def removeMinMaxCounter(name: String): Boolean = - removeMinMaxCounter(name, Map.empty) - - def removeMinMaxCounter(name: String, tags: Map[String, String]): Boolean - - def registerMinMaxCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, - dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None): MinMaxCounter - - // - // Gauge registration and removal - // - - def gauge(name: String)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector) - - def gauge(name: String, unitOfMeasurement: UnitOfMeasurement)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, unitOfMeasurement = Some(unitOfMeasurement)) - - def gauge(name: String, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, dynamicRange = Some(dynamicRange)) - - def gauge(name: String, refreshInterval: FiniteDuration)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, refreshInterval = Some(refreshInterval)) - - def gauge(name: String, dynamicRange: DynamicRange, refreshInterval: FiniteDuration)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, dynamicRange = Some(dynamicRange), refreshInterval = Some(refreshInterval)) - - def gauge(name: String, unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, unitOfMeasurement = Some(unitOfMeasurement), dynamicRange = Some(dynamicRange)) - - def gauge(name: String, tags: Map[String, String])(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, tags) - - def gauge(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, tags, Some(unitOfMeasurement)) - - def gauge(name: String, tags: Map[String, String], dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, tags, dynamicRange = Some(dynamicRange)) - - def gauge(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement, dynamicRange: DynamicRange)(valueCollector: CurrentValueCollector): Gauge = - registerGauge(name, valueCollector, tags, Some(unitOfMeasurement), Some(dynamicRange)) - - def removeGauge(name: String): Boolean = - removeGauge(name, Map.empty) - - def removeGauge(name: String, tags: Map[String, String]): Boolean - - def registerGauge(name: String, valueCollector: CurrentValueCollector, tags: Map[String, String] = Map.empty, - unitOfMeasurement: Option[UnitOfMeasurement] = None, dynamicRange: Option[DynamicRange] = None, - refreshInterval: Option[FiniteDuration] = None): Gauge - - // - // Counters registration and removal - // - - def counter(name: String): Counter = - registerCounter(name) - - def counter(name: String, unitOfMeasurement: UnitOfMeasurement): Counter = - registerCounter(name, unitOfMeasurement = Some(unitOfMeasurement)) - - def counter(name: String, tags: Map[String, String]): Counter = - registerCounter(name, tags) - - def counter(name: String, tags: Map[String, String], unitOfMeasurement: UnitOfMeasurement): Counter = - registerCounter(name, tags, Some(unitOfMeasurement)) - - def removeCounter(name: String): Boolean = - removeCounter(name, Map.empty) - - def removeCounter(name: String, tags: Map[String, String]): Boolean - - def registerCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, - dynamicRange: Option[DynamicRange] = None): Counter - - // - // Entities registration and removal - // - - def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], name: String): T = - entity(recorderFactory, Entity(name, recorderFactory.category)) - - def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], name: String, tags: Map[String, String]): T = - entity(recorderFactory, Entity(name, recorderFactory.category, tags)) - - def removeEntity(name: String, category: String): Boolean = - removeEntity(Entity(name, category, Map.empty)) - - def removeEntity(name: String, category: String, tags: Map[String, String]): Boolean = - removeEntity(Entity(name, category, tags)) - - def removeEntity(entity: Entity): Boolean - - def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], entity: Entity): T - - def find(name: String, category: String): Option[EntityRecorder] = - find(Entity(name, category)) - - def find(name: String, category: String, tags: Map[String, String]): Option[EntityRecorder] = - find(Entity(name, category, tags)) - - def find(entity: Entity): Option[EntityRecorder] - - def subscribe(filter: SubscriptionFilter, subscriber: ActorRef): Unit = - subscribe(filter, subscriber, permanently = true) - - def subscribe(category: String, selection: String, subscriber: ActorRef, permanently: Boolean): Unit = - subscribe(SubscriptionFilter(category, selection), subscriber, permanently) - - def subscribe(category: String, selection: String, subscriber: ActorRef): Unit = - subscribe(SubscriptionFilter(category, selection), subscriber, permanently = true) - - def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanently: Boolean): Unit - - def unsubscribe(subscriber: ActorRef): Unit - - def buildDefaultCollectionContext: CollectionContext - - def instrumentFactory(category: String): InstrumentFactory -} - -private[kamon] class MetricsModuleImpl(config: Config) extends MetricsModule { - import kamon.util.TriemapAtomicGetOrElseUpdate.Syntax - - private val _trackedEntities = TrieMap.empty[Entity, EntityRecorder] - private val _subscriptions = new LazyActorRef - - @volatile var settings = MetricsSettings(config) - - val defaultTags: Map[String, String] = if (config.hasPath("kamon.default-tags")) { - config.getConfig("kamon.default-tags").resolve().entrySet().asScala - .collect { - case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.STRING => - (e.getKey, e.getValue.unwrapped().asInstanceOf[String]) - case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.NUMBER => - (e.getKey, e.getValue.unwrapped().asInstanceOf[Int].toString) - case e: Entry[String, ConfigValue] if e.getValue.valueType() == ConfigValueType.BOOLEAN => - (e.getKey, e.getValue.unwrapped().asInstanceOf[Boolean].toString) - }.toMap - } - else { - Map.empty - } - - def shouldTrack(entity: Entity): Boolean = - settings.entityFilters.get(entity.category).map { - filter ⇒ filter.accept(entity.name) - - } getOrElse (settings.trackUnmatchedEntities) - - def registerHistogram(name: String, tags: Map[String, String], unitOfMeasurement: Option[UnitOfMeasurement], - dynamicRange: Option[DynamicRange]): Histogram = { - - val histogramEntity = Entity(name, SingleInstrumentEntityRecorder.Histogram, tags ++ defaultTags) - val recorder = _trackedEntities.atomicGetOrElseUpdate(histogramEntity, { - val factory = instrumentFactory(histogramEntity.category) - HistogramRecorder( - HistogramKey(histogramEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), - factory.createHistogram(name, dynamicRange) - ) - }, _.cleanup) - - recorder.asInstanceOf[HistogramRecorder].instrument - } - - def removeHistogram(name: String, tags: Map[String, String]): Boolean = - _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Histogram, tags ++ defaultTags)).isDefined - - def registerMinMaxCounter(name: String, tags: Map[String, String], unitOfMeasurement: Option[UnitOfMeasurement], dynamicRange: Option[DynamicRange], - refreshInterval: Option[FiniteDuration]): MinMaxCounter = { - - val minMaxCounterEntity = Entity(name, SingleInstrumentEntityRecorder.MinMaxCounter, tags ++ defaultTags) - val recorder = _trackedEntities.atomicGetOrElseUpdate(minMaxCounterEntity, { - val factory = instrumentFactory(minMaxCounterEntity.category) - MinMaxCounterRecorder( - MinMaxCounterKey(minMaxCounterEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), - factory.createMinMaxCounter(name, dynamicRange, refreshInterval) - ) - }, _.cleanup) - - recorder.asInstanceOf[MinMaxCounterRecorder].instrument - } - - def removeMinMaxCounter(name: String, tags: Map[String, String]): Boolean = - _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.MinMaxCounter, tags ++ defaultTags)).isDefined - - def registerGauge(name: String, valueCollector: CurrentValueCollector, tags: Map[String, String] = Map.empty, - unitOfMeasurement: Option[UnitOfMeasurement] = None, dynamicRange: Option[DynamicRange] = None, - refreshInterval: Option[FiniteDuration] = None): Gauge = { - - val gaugeEntity = Entity(name, SingleInstrumentEntityRecorder.Gauge, tags ++ defaultTags) - val recorder = _trackedEntities.atomicGetOrElseUpdate(gaugeEntity, { - val factory = instrumentFactory(gaugeEntity.category) - GaugeRecorder( - GaugeKey(gaugeEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), - factory.createGauge(name, dynamicRange, refreshInterval, valueCollector) - ) - }, _.cleanup) - - recorder.asInstanceOf[GaugeRecorder].instrument - } - - def removeGauge(name: String, tags: Map[String, String]): Boolean = - _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Gauge, tags ++ defaultTags)).isDefined - - def registerCounter(name: String, tags: Map[String, String] = Map.empty, unitOfMeasurement: Option[UnitOfMeasurement] = None, - dynamicRange: Option[DynamicRange] = None): Counter = { - - val counterEntity = Entity(name, SingleInstrumentEntityRecorder.Counter, tags ++ defaultTags) - val recorder = _trackedEntities.atomicGetOrElseUpdate(counterEntity, { - val factory = instrumentFactory(counterEntity.category) - CounterRecorder( - CounterKey(counterEntity.category, unitOfMeasurement.getOrElse(UnitOfMeasurement.Unknown)), - factory.createCounter() - ) - }, _.cleanup) - - recorder.asInstanceOf[CounterRecorder].instrument - } - - def removeCounter(name: String, tags: Map[String, String]): Boolean = - _trackedEntities.remove(Entity(name, SingleInstrumentEntityRecorder.Counter, tags ++ defaultTags)).isDefined - - def entity[T <: EntityRecorder](recorderFactory: EntityRecorderFactory[T], entity: Entity): T = { - _trackedEntities.atomicGetOrElseUpdate(entity.copy(tags = entity.tags ++ defaultTags), { - recorderFactory.createRecorder(instrumentFactory(recorderFactory.category)) - }, _.cleanup).asInstanceOf[T] - } - - def removeEntity(entity: Entity): Boolean = { - val removedEntity = _trackedEntities.remove(entity.copy(tags = entity.tags ++ defaultTags)) - removedEntity.foreach(_.cleanup) - removedEntity.isDefined - } - - def find(entity: Entity): Option[EntityRecorder] = - _trackedEntities.get(entity.copy(tags = entity.tags ++ defaultTags)) - - def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanent: Boolean): Unit = - _subscriptions.tell(Subscribe(filter, subscriber, permanent)) - - def unsubscribe(subscriber: ActorRef): Unit = - _subscriptions.tell(Unsubscribe(subscriber)) - - def buildDefaultCollectionContext: CollectionContext = - CollectionContext(settings.defaultCollectionContextBufferSize) - - def instrumentFactory(category: String): InstrumentFactory = - settings.instrumentFactories.getOrElse(category, settings.defaultInstrumentFactory) - - private[kamon] def collectSnapshots(collectionContext: CollectionContext): Map[Entity, EntitySnapshot] = { - val builder = Map.newBuilder[Entity, EntitySnapshot] - _trackedEntities.foreach { - case (identity, recorder) ⇒ builder += ((identity, recorder.collect(collectionContext))) - } - - builder.result() - } - - /** - * Metrics Extension initialization. - */ - private var _system: ActorSystem = null - private lazy val _start = { - _subscriptions.point(_system.actorOf(SubscriptionsDispatcher.props(settings.tickInterval, this), "metrics")) - settings.pointScheduler(DefaultRefreshScheduler(_system.scheduler, _system.dispatcher)) - } - - def start(system: ActorSystem, newConfig: Config): Unit = synchronized { - settings = MetricsSettings(newConfig) - _system = system - _start - _system = null - } -} - -private[kamon] object MetricsModuleImpl { - - def apply(config: Config) = - new MetricsModuleImpl(config) -} - diff --git a/kamon-core/src/main/scala/kamon/metric/MetricsSettings.scala b/kamon-core/src/main/scala/kamon/metric/MetricsSettings.scala deleted file mode 100644 index 592e8f67..00000000 --- a/kamon-core/src/main/scala/kamon/metric/MetricsSettings.scala +++ /dev/null @@ -1,123 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import com.typesafe.config.Config -import kamon.metric.instrument._ -import kamon.util.PathFilter -import kamon.util.GlobPathFilter -import kamon.util.RegexPathFilter - -import scala.concurrent.duration.FiniteDuration - -/** - * Configuration settings for the Metrics extension, as read from the `kamon.metric` configuration key. - */ -case class MetricsSettings( - tickInterval: FiniteDuration, - defaultCollectionContextBufferSize: Int, - trackUnmatchedEntities: Boolean, - entityFilters: Map[String, EntityFilter], - instrumentFactories: Map[String, InstrumentFactory], - defaultInstrumentFactory: InstrumentFactory, - refreshScheduler: RefreshScheduler -) { - - private[kamon] def pointScheduler(targetScheduler: RefreshScheduler): Unit = refreshScheduler match { - case lrs: LazyRefreshScheduler ⇒ lrs.point(targetScheduler) - case others ⇒ - } -} - -/** - * - */ -case class EntityFilter(includes: List[PathFilter], excludes: List[PathFilter]) { - def accept(name: String): Boolean = - includes.exists(_.accept(name)) && !excludes.exists(_.accept(name)) -} - -object MetricsSettings { - import kamon.util.ConfigTools.Syntax - - def apply(config: Config): MetricsSettings = { - val metricConfig = config.getConfig("kamon.metric") - - val tickInterval = metricConfig.getFiniteDuration("tick-interval") - val collectBufferSize = metricConfig.getInt("default-collection-context-buffer-size") - val trackUnmatchedEntities = metricConfig.getBoolean("track-unmatched-entities") - val entityFilters = loadFilters(metricConfig.getConfig("filters")) - val defaultInstrumentSettings = DefaultInstrumentSettings.fromConfig(metricConfig.getConfig("default-instrument-settings")) - - val refreshScheduler = new LazyRefreshScheduler - val instrumentFactories = loadInstrumentFactories(metricConfig.getConfig("instrument-settings"), defaultInstrumentSettings, refreshScheduler) - val defaultInstrumentFactory = new InstrumentFactory(Map.empty, defaultInstrumentSettings, refreshScheduler) - - MetricsSettings(tickInterval, collectBufferSize, trackUnmatchedEntities, entityFilters, instrumentFactories, - defaultInstrumentFactory, refreshScheduler) - } - - /** - * Load all the default filters configured under the `kamon.metric.filters` configuration key. All filters are - * defined with the entity category as a sub-key of the `kamon.metric.filters` key and two sub-keys to it: includes - * and excludes with lists of string glob or regex patterns as values ('asRegex' defaults to false). Example: - * - * {{{ - * - * kamon.metrics.filters { - * actor { - * includes = ["user/test-actor", "user/service/worker-*"] - * excludes = ["user/IO-*"] - * asRegex = false - * } - * } - * - * }}} - * - * @return a Map from category name to corresponding entity filter. - */ - def loadFilters(filtersConfig: Config): Map[String, EntityFilter] = { - import scala.collection.JavaConverters._ - - filtersConfig.firstLevelKeys map { category: String ⇒ - val asRegex = if (filtersConfig.hasPath(s"$category.asRegex")) filtersConfig.getBoolean(s"$category.asRegex") else false - val includes = filtersConfig.getStringList(s"$category.includes").asScala.map(inc ⇒ - if (asRegex) RegexPathFilter(inc) else new GlobPathFilter(inc)).toList - val excludes = filtersConfig.getStringList(s"$category.excludes").asScala.map(exc ⇒ - if (asRegex) RegexPathFilter(exc) else new GlobPathFilter(exc)).toList - - (category, EntityFilter(includes, excludes)) - } toMap - } - - /** - * Load any custom configuration settings defined under the `kamon.metric.instrument-settings` configuration key and - * create InstrumentFactories for them. - * - * @return a Map from category name to InstrumentFactory. - */ - def loadInstrumentFactories(instrumentSettings: Config, defaults: DefaultInstrumentSettings, refreshScheduler: RefreshScheduler): Map[String, InstrumentFactory] = { - instrumentSettings.firstLevelKeys.map { category ⇒ - val categoryConfig = instrumentSettings.getConfig(category) - val customSettings = categoryConfig.firstLevelKeys.map { instrumentName ⇒ - (instrumentName, InstrumentCustomSettings.fromConfig(categoryConfig.getConfig(instrumentName))) - } toMap - - (category, new InstrumentFactory(customSettings, defaults, refreshScheduler)) - } toMap - } -} diff --git a/kamon-core/src/main/scala/kamon/metric/SubscriptionsDispatcher.scala b/kamon-core/src/main/scala/kamon/metric/SubscriptionsDispatcher.scala deleted file mode 100644 index 09bf58ad..00000000 --- a/kamon-core/src/main/scala/kamon/metric/SubscriptionsDispatcher.scala +++ /dev/null @@ -1,116 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import akka.actor._ -import kamon.metric.SubscriptionsDispatcher._ -import kamon.util.{MilliTimestamp, GlobPathFilter} -import scala.concurrent.duration.FiniteDuration - -/** - * Manages subscriptions to metrics and dispatch snapshots on every tick to all subscribers. - */ -private[kamon] class SubscriptionsDispatcher(interval: FiniteDuration, metricsExtension: MetricsModuleImpl) extends Actor { - var lastTick = MilliTimestamp.now - var oneShotSubscriptions = Map.empty[ActorRef, SubscriptionFilter] - var permanentSubscriptions = Map.empty[ActorRef, SubscriptionFilter] - val tickSchedule = context.system.scheduler.schedule(interval, interval, self, Tick)(context.dispatcher) - val collectionContext = metricsExtension.buildDefaultCollectionContext - - def receive = { - case Tick ⇒ processTick() - case Subscribe(filter, subscriber, permanently) ⇒ subscribe(filter, subscriber, permanently) - case Unsubscribe(subscriber) ⇒ unsubscribe(subscriber) - case Terminated(subscriber) ⇒ unsubscribe(subscriber) - } - - def processTick(): Unit = - dispatch(metricsExtension.collectSnapshots(collectionContext)) - - def subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanent: Boolean): Unit = { - def addSubscription(storage: Map[ActorRef, SubscriptionFilter]): Map[ActorRef, SubscriptionFilter] = - storage.updated(subscriber, storage.getOrElse(subscriber, SubscriptionFilter.Empty).combine(filter)) - - context.watch(subscriber) - - if (permanent) - permanentSubscriptions = addSubscription(permanentSubscriptions) - else - oneShotSubscriptions = addSubscription(oneShotSubscriptions) - } - - def unsubscribe(subscriber: ActorRef): Unit = { - permanentSubscriptions = permanentSubscriptions - subscriber - oneShotSubscriptions = oneShotSubscriptions - subscriber - } - - def dispatch(snapshots: Map[Entity, EntitySnapshot]): Unit = { - val currentTick = MilliTimestamp.now - - dispatchSelections(lastTick, currentTick, permanentSubscriptions, snapshots) - dispatchSelections(lastTick, currentTick, oneShotSubscriptions, snapshots) - - lastTick = currentTick - oneShotSubscriptions = Map.empty[ActorRef, SubscriptionFilter] - } - - def dispatchSelections(lastTick: MilliTimestamp, currentTick: MilliTimestamp, subscriptions: Map[ActorRef, SubscriptionFilter], - snapshots: Map[Entity, EntitySnapshot]): Unit = { - - for ((subscriber, filter) ← subscriptions) { - val selection = snapshots.filter(group ⇒ filter.accept(group._1)) - val tickMetrics = TickMetricSnapshot(lastTick, currentTick, selection) - - subscriber ! tickMetrics - } - } -} - -object SubscriptionsDispatcher { - def props(interval: FiniteDuration, metricsExtension: MetricsModuleImpl): Props = - Props(new SubscriptionsDispatcher(interval, metricsExtension)) - - case object Tick - case class Unsubscribe(subscriber: ActorRef) - case class Subscribe(filter: SubscriptionFilter, subscriber: ActorRef, permanently: Boolean = false) - case class TickMetricSnapshot(from: MilliTimestamp, to: MilliTimestamp, metrics: Map[Entity, EntitySnapshot]) - -} - -trait SubscriptionFilter { self ⇒ - - def accept(entity: Entity): Boolean - - final def combine(that: SubscriptionFilter): SubscriptionFilter = new SubscriptionFilter { - override def accept(entity: Entity): Boolean = self.accept(entity) || that.accept(entity) - } -} - -object SubscriptionFilter { - val Empty = new SubscriptionFilter { - def accept(entity: Entity): Boolean = false - } - - def apply(category: String, name: String): SubscriptionFilter = new SubscriptionFilter { - val categoryPattern = new GlobPathFilter(category) - val namePattern = new GlobPathFilter(name) - - def accept(entity: Entity): Boolean = { - categoryPattern.accept(entity.category) && namePattern.accept(entity.name) - } - } -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/TickMetricSnapshotBuffer.scala b/kamon-core/src/main/scala/kamon/metric/TickMetricSnapshotBuffer.scala deleted file mode 100644 index 22557974..00000000 --- a/kamon-core/src/main/scala/kamon/metric/TickMetricSnapshotBuffer.scala +++ /dev/null @@ -1,65 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import akka.actor.{Props, Actor, ActorRef} -import kamon.Kamon -import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot -import kamon.metric.TickMetricSnapshotBuffer.FlushBuffer -import kamon.metric.instrument.CollectionContext -import kamon.util.MapMerge - -import scala.concurrent.duration.FiniteDuration - -class TickMetricSnapshotBuffer(flushInterval: FiniteDuration, receiver: ActorRef) extends Actor { - import MapMerge.Syntax - - val flushSchedule = context.system.scheduler.schedule(flushInterval, flushInterval, self, FlushBuffer)(context.dispatcher) - val collectionContext: CollectionContext = Kamon.metrics.buildDefaultCollectionContext - - def receive = empty - - def empty: Actor.Receive = { - case tick: TickMetricSnapshot ⇒ context become (buffering(tick)) - case FlushBuffer ⇒ // Nothing to flush. - } - - def buffering(buffered: TickMetricSnapshot): Actor.Receive = { - case TickMetricSnapshot(_, to, tickMetrics) ⇒ - val combinedMetrics = buffered.metrics.merge(tickMetrics, (l, r) ⇒ l.merge(r, collectionContext)) - val combinedSnapshot = TickMetricSnapshot(buffered.from, to, combinedMetrics) - - context become (buffering(combinedSnapshot)) - - case FlushBuffer ⇒ - receiver ! buffered - context become (empty) - - } - - override def postStop(): Unit = { - flushSchedule.cancel() - super.postStop() - } -} - -object TickMetricSnapshotBuffer { - case object FlushBuffer - - def props(flushInterval: FiniteDuration, receiver: ActorRef): Props = - Props[TickMetricSnapshotBuffer](new TickMetricSnapshotBuffer(flushInterval, receiver)) -} diff --git a/kamon-core/src/main/scala/kamon/metric/TraceMetrics.scala b/kamon-core/src/main/scala/kamon/metric/TraceMetrics.scala deleted file mode 100644 index eaeebb97..00000000 --- a/kamon-core/src/main/scala/kamon/metric/TraceMetrics.scala +++ /dev/null @@ -1,53 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import kamon.metric.instrument.{Time, InstrumentFactory} - -class TraceMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { - - /** - * Records blah blah - */ - val elapsedTime = histogram("elapsed-time", unitOfMeasurement = Time.Nanoseconds) - val errors = counter("errors") -} - -object TraceMetrics extends EntityRecorderFactory[TraceMetrics] { - def category: String = "trace" - def createRecorder(instrumentFactory: InstrumentFactory): TraceMetrics = new TraceMetrics(instrumentFactory) - - // Java API. - def factory: EntityRecorderFactory[TraceMetrics] = this -} - -class SegmentMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { - - /** - * Records blah blah - */ - val elapsedTime = histogram("elapsed-time", unitOfMeasurement = Time.Nanoseconds) - val errors = counter("errors") -} - -object SegmentMetrics extends EntityRecorderFactory[SegmentMetrics] { - def category: String = "trace-segment" - def createRecorder(instrumentFactory: InstrumentFactory): SegmentMetrics = new SegmentMetrics(instrumentFactory) - - // Java API. - def factory: EntityRecorderFactory[SegmentMetrics] = this -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/Counter.scala b/kamon-core/src/main/scala/kamon/metric/instrument/Counter.scala deleted file mode 100644 index b7ab60de..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/Counter.scala +++ /dev/null @@ -1,65 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import kamon.jsr166.LongAdder - -trait Counter extends Instrument { - type SnapshotType = Counter.Snapshot - - def increment(): Unit - def increment(times: Long): Unit -} - -object Counter { - - def apply(): Counter = new LongAdderCounter - def create(): Counter = apply() - - trait Snapshot extends InstrumentSnapshot { - def count: Long - def merge(that: InstrumentSnapshot, context: CollectionContext): Counter.Snapshot - def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Counter.Snapshot - } -} - -class LongAdderCounter extends Counter { - private val counter = new LongAdder - - def increment(): Unit = counter.increment() - - def increment(times: Long): Unit = { - if (times < 0) - throw new UnsupportedOperationException("Counters cannot be decremented") - counter.add(times) - } - - def collect(context: CollectionContext): Counter.Snapshot = CounterSnapshot(counter.sumAndReset()) - - def cleanup: Unit = {} -} - -case class CounterSnapshot(count: Long) extends Counter.Snapshot { - def merge(that: InstrumentSnapshot, context: CollectionContext): Counter.Snapshot = that match { - case CounterSnapshot(thatCount) ⇒ CounterSnapshot(count + thatCount) - case other ⇒ sys.error(s"Cannot merge a CounterSnapshot with the incompatible [${other.getClass.getName}] type.") - } - - override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Counter.Snapshot = - CounterSnapshot(from.tryScale(to)(count).toLong) - -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/Gauge.scala b/kamon-core/src/main/scala/kamon/metric/instrument/Gauge.scala deleted file mode 100644 index 39571d3d..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/Gauge.scala +++ /dev/null @@ -1,120 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import java.util.concurrent.atomic.{AtomicLong, AtomicLongFieldUpdater, AtomicReference} - -import akka.actor.Cancellable -import kamon.metric.instrument.Gauge.CurrentValueCollector -import kamon.metric.instrument.Histogram.DynamicRange - -import scala.concurrent.duration.FiniteDuration - -trait Gauge extends Instrument { - type SnapshotType = Histogram.Snapshot - - def record(value: Long): Unit - def record(value: Long, count: Long): Unit - def refreshValue(): Unit -} - -object Gauge { - - def apply(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler, valueCollector: CurrentValueCollector): Gauge = { - val underlyingHistogram = Histogram(dynamicRange) - val gauge = new HistogramBackedGauge(underlyingHistogram, valueCollector) - val refreshValuesSchedule = scheduler.schedule(refreshInterval, () ⇒ { - gauge.refreshValue() - }) - - gauge.automaticValueCollectorSchedule.set(refreshValuesSchedule) - gauge - } - - def create(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler, valueCollector: CurrentValueCollector): Gauge = - apply(dynamicRange, refreshInterval, scheduler, valueCollector) - - trait CurrentValueCollector { - def currentValue: Long - } - - implicit def functionZeroAsCurrentValueCollector(f: () ⇒ Long): CurrentValueCollector = new CurrentValueCollector { - def currentValue: Long = f.apply() - } - - implicit def callByNameLongAsCurrentValueCollector(f: ⇒ Long): CurrentValueCollector = new CurrentValueCollector { - def currentValue: Long = f - } -} - -/** - * Helper for cases in which a gauge shouldn't store the current value of a observed value but the difference between - * the current observed value and the previously observed value. Should only be used if the observed value is always - * increasing or staying steady, but is never able to decrease. - * - * Note: The first time a value is collected, this wrapper will always return zero, afterwards, the difference between - * the current value and the last value will be returned. - */ -class DifferentialValueCollector(wrappedValueCollector: CurrentValueCollector) extends CurrentValueCollector { - @volatile private var _readAtLeastOnce = false - private val _lastObservedValue = new AtomicLong(0) - - def currentValue: Long = { - if (_readAtLeastOnce) { - val wrappedCurrent = wrappedValueCollector.currentValue - val diff = wrappedCurrent - _lastObservedValue.getAndSet(wrappedCurrent) - - if (diff >= 0) diff else 0L - - } else { - _lastObservedValue.set(wrappedValueCollector.currentValue) - _readAtLeastOnce = true - 0L - } - - } -} - -object DifferentialValueCollector { - def apply(wrappedValueCollector: CurrentValueCollector): CurrentValueCollector = - new DifferentialValueCollector(wrappedValueCollector) - - def apply(wrappedValueCollector: ⇒ Long): CurrentValueCollector = - new DifferentialValueCollector(new CurrentValueCollector { - def currentValue: Long = wrappedValueCollector - }) -} - -class HistogramBackedGauge(underlyingHistogram: Histogram, currentValueCollector: Gauge.CurrentValueCollector) extends Gauge { - private[kamon] val automaticValueCollectorSchedule = new AtomicReference[Cancellable]() - - def record(value: Long): Unit = underlyingHistogram.record(value) - - def record(value: Long, count: Long): Unit = underlyingHistogram.record(value, count) - - def collect(context: CollectionContext): Histogram.Snapshot = underlyingHistogram.collect(context) - - def cleanup: Unit = { - if (automaticValueCollectorSchedule.get() != null) - automaticValueCollectorSchedule.get().cancel() - } - - def refreshValue(): Unit = - underlyingHistogram.record(currentValueCollector.currentValue) - -} - diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/Histogram.scala b/kamon-core/src/main/scala/kamon/metric/instrument/Histogram.scala deleted file mode 100644 index 399f0880..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/Histogram.scala +++ /dev/null @@ -1,331 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import java.nio.LongBuffer - -import kamon.metric.instrument.Histogram.{DynamicRange, Snapshot} -import kamon.util.logger.LazyLogger -import org.HdrHistogram.ModifiedAtomicHistogram - -trait Histogram extends Instrument { - type SnapshotType = Histogram.Snapshot - - def record(value: Long): Unit - def record(value: Long, count: Long): Unit -} - -object Histogram { - - /** - * Scala API: - * - * Create a new High Dynamic Range Histogram ([[kamon.metric.instrument.HdrHistogram]]) using the given - * [[kamon.metric.instrument.Histogram.DynamicRange]]. - */ - def apply(dynamicRange: DynamicRange): Histogram = new HdrHistogram(dynamicRange) - - /** - * Java API: - * - * Create a new High Dynamic Range Histogram ([[kamon.metric.instrument.HdrHistogram]]) using the given - * [[kamon.metric.instrument.Histogram.DynamicRange]]. - */ - def create(dynamicRange: DynamicRange): Histogram = apply(dynamicRange) - - /** - * DynamicRange is a configuration object used to supply range and precision configuration to a - * [[kamon.metric.instrument.HdrHistogram]]. See the [[http://hdrhistogram.github.io/HdrHistogram/ HdrHistogram website]] - * for more details on how it works and the effects of these configuration values. - * - * @param lowestDiscernibleValue - * The lowest value that can be discerned (distinguished from 0) by the histogram.Must be a positive integer that - * is >= 1. May be internally rounded down to nearest power of 2. - * @param highestTrackableValue - * The highest value to be tracked by the histogram. Must be a positive integer that is >= (2 * lowestDiscernibleValue). - * Must not be larger than (Long.MAX_VALUE/2). - * @param precision - * The number of significant decimal digits to which the histogram will maintain value resolution and separation. - * Must be a non-negative integer between 1 and 3. - */ - case class DynamicRange(lowestDiscernibleValue: Long, highestTrackableValue: Long, precision: Int) - - trait Record { - def level: Long - def count: Long - - private[kamon] def rawCompactRecord: Long - } - - case class MutableRecord(var level: Long, var count: Long) extends Record { - var rawCompactRecord: Long = 0L - } - - trait Snapshot extends InstrumentSnapshot { - - def isEmpty: Boolean = numberOfMeasurements == 0 - def numberOfMeasurements: Long - def min: Long - def max: Long - def sum: Long - def percentile(percentile: Double): Long - def recordsIterator: Iterator[Record] - def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot - def merge(that: Histogram.Snapshot, context: CollectionContext): Histogram.Snapshot - - override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot = - new ScaledSnapshot(from, to, this) - } - - class ScaledSnapshot(from: UnitOfMeasurement, to: UnitOfMeasurement, snapshot: Snapshot) extends Snapshot { - private def doScale(v: Long) = from.tryScale(to)(v).toLong - override def numberOfMeasurements: Long = snapshot.numberOfMeasurements - - override def max: Long = doScale(snapshot.max) - - override def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot = snapshot.merge(that, context) - - override def merge(that: Snapshot, context: CollectionContext): Snapshot = snapshot.merge(that, context) - - override def percentile(percentile: Double): Long = doScale(snapshot.percentile(percentile)) - - override def min: Long = doScale(snapshot.min) - - override def sum: Long = doScale(snapshot.sum) - - override def recordsIterator: Iterator[Record] = { - snapshot.recordsIterator.map(record ⇒ new Record { - override def count: Long = record.count - - override def level: Long = doScale(record.level) - - override private[kamon] def rawCompactRecord: Long = record.rawCompactRecord - }) - } - - override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot = - if (this.from == from && this.to == to) this else super.scale(from, to) - } - - object Snapshot { - val empty = new Snapshot { - override def min: Long = 0L - override def max: Long = 0L - override def sum: Long = 0L - override def percentile(percentile: Double): Long = 0L - override def recordsIterator: Iterator[Record] = Iterator.empty - override def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot = that - override def merge(that: Histogram.Snapshot, context: CollectionContext): Histogram.Snapshot = that - override def numberOfMeasurements: Long = 0L - override def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): Histogram.Snapshot = this - } - } -} - -object HdrHistogram { - private val log = LazyLogger(classOf[HdrHistogram]) -} - -/** - * This implementation is meant to be used for real time data collection where data snapshots are taken often over time. - * The collect(..) operation extracts all the recorded values from the histogram and resets the counts, but still - * leave it in a consistent state even in the case of concurrent modification while the snapshot is being taken. - */ -class HdrHistogram(dynamicRange: DynamicRange) extends ModifiedAtomicHistogram( - dynamicRange.lowestDiscernibleValue, - dynamicRange.highestTrackableValue, dynamicRange.precision -) with Histogram { - import HdrHistogram.log - - def record(value: Long): Unit = tryRecord(value, 1L) - - def record(value: Long, count: Long): Unit = tryRecord(value, count) - - private def tryRecord(value: Long, count: Long): Unit = { - try { - recordValueWithCount(value, count) - } catch { - case anyException: Throwable ⇒ - log.warn(s"Failed to store value $value in HdrHistogram, please review your range configuration.", anyException) - } - } - - def collect(context: CollectionContext): Histogram.Snapshot = { - import context.buffer - buffer.clear() - val nrOfMeasurements = writeSnapshotTo(buffer) - - buffer.flip() - - val measurementsArray = Array.ofDim[Long](buffer.limit()) - buffer.get(measurementsArray, 0, measurementsArray.length) - new CompactHdrSnapshot(nrOfMeasurements, measurementsArray, protectedUnitMagnitude(), protectedSubBucketHalfCount(), protectedSubBucketHalfCountMagnitude()) - } - - def getCounts = countsArray().length() - - def cleanup: Unit = {} - - private def writeSnapshotTo(buffer: LongBuffer): Long = { - val counts = countsArray() - val countsLength = counts.length() - - var nrOfMeasurements = 0L - var index = 0L - while (index < countsLength) { - val countAtIndex = counts.getAndSet(index.toInt, 0L) - - if (countAtIndex > 0) { - buffer.put(CompactHdrSnapshot.compactRecord(index, countAtIndex)) - nrOfMeasurements += countAtIndex - } - - index += 1 - } - nrOfMeasurements - } -} - -case class CompactHdrSnapshot(numberOfMeasurements: Long, compactRecords: Array[Long], unitMagnitude: Int, - subBucketHalfCount: Int, subBucketHalfCountMagnitude: Int) extends Histogram.Snapshot { - - def min: Long = if (compactRecords.length == 0) 0 else levelFromCompactRecord(compactRecords(0)) - def max: Long = if (compactRecords.length == 0) 0 else levelFromCompactRecord(compactRecords(compactRecords.length - 1)) - def sum: Long = recordsIterator.foldLeft(0L)((a, r) ⇒ a + (r.count * r.level)) - - def percentile(p: Double): Long = { - val records = recordsIterator - val threshold = numberOfMeasurements * (p / 100D) - var countToCurrentLevel = 0L - var percentileLevel = 0L - - while (countToCurrentLevel < threshold && records.hasNext) { - val record = records.next() - countToCurrentLevel += record.count - percentileLevel = record.level - } - - percentileLevel - } - - def merge(that: Histogram.Snapshot, context: CollectionContext): Snapshot = - merge(that.asInstanceOf[InstrumentSnapshot], context) - - def merge(that: InstrumentSnapshot, context: CollectionContext): Histogram.Snapshot = that match { - case thatSnapshot: CompactHdrSnapshot ⇒ - if (thatSnapshot.isEmpty) this else if (this.isEmpty) thatSnapshot else { - import context.buffer - buffer.clear() - - val selfIterator = recordsIterator - val thatIterator = thatSnapshot.recordsIterator - var thatCurrentRecord: Histogram.Record = null - var mergedNumberOfMeasurements = 0L - - def nextOrNull(iterator: Iterator[Histogram.Record]): Histogram.Record = if (iterator.hasNext) iterator.next() else null - def addToBuffer(compactRecord: Long): Unit = { - mergedNumberOfMeasurements += countFromCompactRecord(compactRecord) - buffer.put(compactRecord) - } - - while (selfIterator.hasNext) { - val selfCurrentRecord = selfIterator.next() - - // Advance that to no further than the level of selfCurrentRecord - thatCurrentRecord = if (thatCurrentRecord == null) nextOrNull(thatIterator) else thatCurrentRecord - while (thatCurrentRecord != null && thatCurrentRecord.level < selfCurrentRecord.level) { - addToBuffer(thatCurrentRecord.rawCompactRecord) - thatCurrentRecord = nextOrNull(thatIterator) - } - - // Include the current record of self and optionally merge if has the same level as thatCurrentRecord - if (thatCurrentRecord != null && thatCurrentRecord.level == selfCurrentRecord.level) { - addToBuffer(mergeCompactRecords(thatCurrentRecord.rawCompactRecord, selfCurrentRecord.rawCompactRecord)) - thatCurrentRecord = nextOrNull(thatIterator) - } else { - addToBuffer(selfCurrentRecord.rawCompactRecord) - } - } - - // Include everything that might have been left from that - if (thatCurrentRecord != null) addToBuffer(thatCurrentRecord.rawCompactRecord) - while (thatIterator.hasNext) { - addToBuffer(thatIterator.next().rawCompactRecord) - } - - buffer.flip() - val compactRecords = Array.ofDim[Long](buffer.limit()) - buffer.get(compactRecords) - - new CompactHdrSnapshot(mergedNumberOfMeasurements, compactRecords, unitMagnitude, subBucketHalfCount, subBucketHalfCountMagnitude) - } - - case other ⇒ - sys.error(s"Cannot merge a CompactHdrSnapshot with the incompatible [${other.getClass.getName}] type.") - - } - - @inline private def mergeCompactRecords(left: Long, right: Long): Long = { - val index = left >> 48 - val leftCount = countFromCompactRecord(left) - val rightCount = countFromCompactRecord(right) - - CompactHdrSnapshot.compactRecord(index, leftCount + rightCount) - } - - @inline private def levelFromCompactRecord(compactRecord: Long): Long = { - val countsArrayIndex = (compactRecord >> 48).toInt - var bucketIndex: Int = (countsArrayIndex >> subBucketHalfCountMagnitude) - 1 - var subBucketIndex: Int = (countsArrayIndex & (subBucketHalfCount - 1)) + subBucketHalfCount - if (bucketIndex < 0) { - subBucketIndex -= subBucketHalfCount - bucketIndex = 0 - } - - subBucketIndex.toLong << (bucketIndex + unitMagnitude) - } - - @inline private def countFromCompactRecord(compactRecord: Long): Long = - compactRecord & CompactHdrSnapshot.CompactRecordCountMask - - def recordsIterator: Iterator[Histogram.Record] = new Iterator[Histogram.Record] { - var currentIndex = 0 - val mutableRecord = Histogram.MutableRecord(0, 0) - - override def hasNext: Boolean = currentIndex < compactRecords.length - - override def next(): Histogram.Record = { - if (hasNext) { - val measurement = compactRecords(currentIndex) - mutableRecord.rawCompactRecord = measurement - mutableRecord.level = levelFromCompactRecord(measurement) - mutableRecord.count = countFromCompactRecord(measurement) - currentIndex += 1 - - mutableRecord - } else { - throw new IllegalStateException("The iterator has already been consumed.") - } - } - } -} - -object CompactHdrSnapshot { - val CompactRecordCountMask = 0xFFFFFFFFFFFFL - - def compactRecord(index: Long, count: Long): Long = (index << 48) | count -} diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/Instrument.scala b/kamon-core/src/main/scala/kamon/metric/instrument/Instrument.scala deleted file mode 100644 index 2c4b4319..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/Instrument.scala +++ /dev/null @@ -1,51 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import java.nio.LongBuffer - -private[kamon] trait Instrument { - type SnapshotType <: InstrumentSnapshot - - def collect(context: CollectionContext): SnapshotType - def cleanup: Unit -} - -trait InstrumentSnapshot { - def merge(that: InstrumentSnapshot, context: CollectionContext): InstrumentSnapshot - - def scale(from: UnitOfMeasurement, to: UnitOfMeasurement): InstrumentSnapshot -} - -trait CollectionContext { - def buffer: LongBuffer -} - -object CollectionContext { - def apply(longBufferSize: Int): CollectionContext = new CollectionContext { - val buffer: LongBuffer = LongBuffer.allocate(longBufferSize) - } -} - -sealed trait InstrumentType - -object InstrumentTypes { - case object Histogram extends InstrumentType - case object MinMaxCounter extends InstrumentType - case object Gauge extends InstrumentType - case object Counter extends InstrumentType -} diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/InstrumentFactory.scala b/kamon-core/src/main/scala/kamon/metric/instrument/InstrumentFactory.scala deleted file mode 100644 index 7c0201f7..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/InstrumentFactory.scala +++ /dev/null @@ -1,51 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import kamon.metric.instrument.Gauge.CurrentValueCollector -import kamon.metric.instrument.Histogram.DynamicRange - -import scala.concurrent.duration.FiniteDuration - -case class InstrumentFactory(configurations: Map[String, InstrumentCustomSettings], defaults: DefaultInstrumentSettings, scheduler: RefreshScheduler) { - - private def resolveSettings(instrumentName: String, codeSettings: Option[InstrumentSettings], default: InstrumentSettings): InstrumentSettings = { - configurations.get(instrumentName).flatMap { customSettings ⇒ - codeSettings.map(cs ⇒ customSettings.combine(cs)) orElse (Some(customSettings.combine(default))) - - } getOrElse (codeSettings.getOrElse(default)) - } - - def createHistogram(name: String, dynamicRange: Option[DynamicRange] = None): Histogram = { - val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, None)), defaults.histogram) - Histogram(settings.dynamicRange) - } - - def createMinMaxCounter(name: String, dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None): MinMaxCounter = { - val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, refreshInterval)), defaults.minMaxCounter) - MinMaxCounter(settings.dynamicRange, settings.refreshInterval.get, scheduler) - } - - def createGauge(name: String, dynamicRange: Option[DynamicRange] = None, refreshInterval: Option[FiniteDuration] = None, - valueCollector: CurrentValueCollector): Gauge = { - - val settings = resolveSettings(name, dynamicRange.map(dr ⇒ InstrumentSettings(dr, refreshInterval)), defaults.gauge) - Gauge(settings.dynamicRange, settings.refreshInterval.get, scheduler, valueCollector) - } - - def createCounter(): Counter = Counter() -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/InstrumentSettings.scala b/kamon-core/src/main/scala/kamon/metric/instrument/InstrumentSettings.scala deleted file mode 100644 index e4d6f547..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/InstrumentSettings.scala +++ /dev/null @@ -1,73 +0,0 @@ -package kamon.metric.instrument - -import com.typesafe.config.Config -import kamon.metric.instrument.Histogram.DynamicRange - -import scala.concurrent.duration.FiniteDuration - -case class InstrumentCustomSettings(lowestDiscernibleValue: Option[Long], highestTrackableValue: Option[Long], - precision: Option[Int], refreshInterval: Option[FiniteDuration]) { - - def combine(that: InstrumentSettings): InstrumentSettings = - InstrumentSettings( - DynamicRange( - lowestDiscernibleValue.getOrElse(that.dynamicRange.lowestDiscernibleValue), - highestTrackableValue.getOrElse(that.dynamicRange.highestTrackableValue), - precision.getOrElse(that.dynamicRange.precision) - ), - refreshInterval.orElse(that.refreshInterval) - ) -} - -object InstrumentCustomSettings { - import kamon.util.ConfigTools.Syntax - - def fromConfig(config: Config): InstrumentCustomSettings = - InstrumentCustomSettings( - if (config.hasPath("lowest-discernible-value")) Some(config.getLong("lowest-discernible-value")) else None, - if (config.hasPath("highest-trackable-value")) Some(config.getLong("highest-trackable-value")) else None, - if (config.hasPath("precision")) Some(InstrumentSettings.parsePrecision(config.getString("precision"))) else None, - if (config.hasPath("refresh-interval")) Some(config.getFiniteDuration("refresh-interval")) else None - ) - -} - -case class InstrumentSettings(dynamicRange: DynamicRange, refreshInterval: Option[FiniteDuration]) - -object InstrumentSettings { - - def readDynamicRange(config: Config): DynamicRange = - DynamicRange( - config.getLong("lowest-discernible-value"), - config.getLong("highest-trackable-value"), - parsePrecision(config.getString("precision")) - ) - - def parsePrecision(stringValue: String): Int = stringValue match { - case "low" ⇒ 1 - case "normal" ⇒ 2 - case "fine" ⇒ 3 - case other ⇒ sys.error(s"Invalid precision configuration [$other] found, valid options are: [low|normal|fine].") - } -} - -case class DefaultInstrumentSettings(histogram: InstrumentSettings, minMaxCounter: InstrumentSettings, gauge: InstrumentSettings) - -object DefaultInstrumentSettings { - - def fromConfig(config: Config): DefaultInstrumentSettings = { - import kamon.util.ConfigTools.Syntax - - val histogramSettings = InstrumentSettings(InstrumentSettings.readDynamicRange(config.getConfig("histogram")), None) - val minMaxCounterSettings = InstrumentSettings( - InstrumentSettings.readDynamicRange(config.getConfig("min-max-counter")), - Some(config.getFiniteDuration("min-max-counter.refresh-interval")) - ) - val gaugeSettings = InstrumentSettings( - InstrumentSettings.readDynamicRange(config.getConfig("gauge")), - Some(config.getFiniteDuration("gauge.refresh-interval")) - ) - - DefaultInstrumentSettings(histogramSettings, minMaxCounterSettings, gaugeSettings) - } -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/MinMaxCounter.scala b/kamon-core/src/main/scala/kamon/metric/instrument/MinMaxCounter.scala deleted file mode 100644 index 76fc2c2a..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/MinMaxCounter.scala +++ /dev/null @@ -1,105 +0,0 @@ -package kamon.metric.instrument - -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -import java.lang.Math.abs -import java.util.concurrent.atomic.AtomicReference -import akka.actor.Cancellable -import kamon.jsr166.LongMaxUpdater -import kamon.metric.instrument.Histogram.DynamicRange -import kamon.util.PaddedAtomicLong -import scala.concurrent.duration.FiniteDuration - -trait MinMaxCounter extends Instrument { - override type SnapshotType = Histogram.Snapshot - - def increment(): Unit - def increment(times: Long): Unit - def decrement(): Unit - def decrement(times: Long): Unit - def refreshValues(): Unit -} - -object MinMaxCounter { - - def apply(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler): MinMaxCounter = { - val underlyingHistogram = Histogram(dynamicRange) - val minMaxCounter = new PaddedMinMaxCounter(underlyingHistogram) - val refreshValuesSchedule = scheduler.schedule(refreshInterval, () ⇒ { - minMaxCounter.refreshValues() - }) - - minMaxCounter.refreshValuesSchedule.set(refreshValuesSchedule) - minMaxCounter - } - - def create(dynamicRange: DynamicRange, refreshInterval: FiniteDuration, scheduler: RefreshScheduler): MinMaxCounter = - apply(dynamicRange, refreshInterval, scheduler) - -} - -class PaddedMinMaxCounter(underlyingHistogram: Histogram) extends MinMaxCounter { - private val min = new LongMaxUpdater(0L) - private val max = new LongMaxUpdater(0L) - private val sum = new PaddedAtomicLong - val refreshValuesSchedule = new AtomicReference[Cancellable]() - - def increment(): Unit = increment(1L) - - def increment(times: Long): Unit = { - val currentValue = sum.addAndGet(times) - max.update(currentValue) - } - - def decrement(): Unit = decrement(1L) - - def decrement(times: Long): Unit = { - val currentValue = sum.addAndGet(-times) - min.update(-currentValue) - } - - def collect(context: CollectionContext): Histogram.Snapshot = { - refreshValues() - underlyingHistogram.collect(context) - } - - def cleanup: Unit = { - if (refreshValuesSchedule.get() != null) - refreshValuesSchedule.get().cancel() - } - - def refreshValues(): Unit = { - val currentValue = { - val value = sum.get() - if (value <= 0) 0 else value - } - - val currentMin = { - val rawMin = min.maxThenReset(-currentValue) - if (rawMin >= 0) - 0 - else - abs(rawMin) - } - - val currentMax = max.maxThenReset(currentValue) - - underlyingHistogram.record(currentValue) - underlyingHistogram.record(currentMin) - underlyingHistogram.record(currentMax) - } -} diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala b/kamon-core/src/main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala deleted file mode 100644 index eb01d114..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/ModifiedAtomicHistogram.scala +++ /dev/null @@ -1,31 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package org.HdrHistogram - -import java.util.concurrent.atomic.AtomicLongArray - -abstract class ModifiedAtomicHistogram(low: Long, high: Long, precision: Int) - extends AtomicHistogram(low, high, precision) { self ⇒ - - override def incrementTotalCount(): Unit = {} - override def addToTotalCount(value: Long): Unit = {} - - def countsArray(): AtomicLongArray = counts - def protectedUnitMagnitude(): Int = unitMagnitude - def protectedSubBucketHalfCount(): Int = subBucketHalfCount - def protectedSubBucketHalfCountMagnitude(): Int = subBucketHalfCountMagnitude -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/RefreshScheduler.scala b/kamon-core/src/main/scala/kamon/metric/instrument/RefreshScheduler.scala deleted file mode 100644 index 6bc02dc3..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/RefreshScheduler.scala +++ /dev/null @@ -1,115 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import akka.actor.{Scheduler, Cancellable} -import org.HdrHistogram.WriterReaderPhaser - -import scala.collection.concurrent.TrieMap -import scala.concurrent.ExecutionContext -import scala.concurrent.duration.FiniteDuration - -trait RefreshScheduler { - def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable -} - -/** - * Default implementation of RefreshScheduler that simply uses an [[akka.actor.Scheduler]] to schedule tasks to be run - * in the provided ExecutionContext. - */ -class DefaultRefreshScheduler(scheduler: Scheduler, dispatcher: ExecutionContext) extends RefreshScheduler { - def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable = - scheduler.schedule(interval, interval)(refresh.apply())(dispatcher) -} - -object DefaultRefreshScheduler { - def apply(scheduler: Scheduler, dispatcher: ExecutionContext): RefreshScheduler = - new DefaultRefreshScheduler(scheduler, dispatcher) - - def create(scheduler: Scheduler, dispatcher: ExecutionContext): RefreshScheduler = - apply(scheduler, dispatcher) -} - -/** - * RefreshScheduler implementation that accumulates all the scheduled actions until it is pointed to another refresh - * scheduler. Once it is pointed, all subsequent calls to `schedule` will immediately be scheduled in the pointed - * scheduler. - */ -class LazyRefreshScheduler extends RefreshScheduler { - private val _schedulerPhaser = new WriterReaderPhaser - private val _backlog = new TrieMap[(FiniteDuration, () ⇒ Unit), RepointableCancellable]() - @volatile private var _target: Option[RefreshScheduler] = None - - def schedule(interval: FiniteDuration, refresh: () ⇒ Unit): Cancellable = { - val criticalEnter = _schedulerPhaser.writerCriticalSectionEnter() - try { - _target.map { scheduler ⇒ - scheduler.schedule(interval, refresh) - - } getOrElse { - val entry = (interval, refresh) - val cancellable = new RepointableCancellable(entry) - - _backlog.put(entry, cancellable) - cancellable - } - - } finally { - _schedulerPhaser.writerCriticalSectionExit(criticalEnter) - } - } - - def point(target: RefreshScheduler): Unit = try { - _schedulerPhaser.readerLock() - - if (_target.isEmpty) { - _target = Some(target) - _schedulerPhaser.flipPhase(10000L) - _backlog.dropWhile { - case ((interval, refresh), repointableCancellable) ⇒ - repointableCancellable.point(target.schedule(interval, refresh)) - true - } - } else sys.error("A LazyRefreshScheduler cannot be pointed more than once.") - } finally { _schedulerPhaser.readerUnlock() } - - class RepointableCancellable(entry: (FiniteDuration, () ⇒ Unit)) extends Cancellable { - private var _isCancelled = false - private var _cancellable: Option[Cancellable] = None - - def isCancelled: Boolean = synchronized { - _cancellable.map(_.isCancelled).getOrElse(_isCancelled) - } - - def cancel(): Boolean = synchronized { - _isCancelled = true - _cancellable.map(_.cancel()).getOrElse(_backlog.remove(entry).nonEmpty) - } - - def point(cancellable: Cancellable): Unit = synchronized { - if (_cancellable.isEmpty) { - _cancellable = Some(cancellable) - - if (_isCancelled) - cancellable.cancel() - - } else sys.error("A RepointableCancellable cannot be pointed more than once.") - - } - } -} - diff --git a/kamon-core/src/main/scala/kamon/metric/instrument/UnitOfMeasurement.scala b/kamon-core/src/main/scala/kamon/metric/instrument/UnitOfMeasurement.scala deleted file mode 100644 index 5952b906..00000000 --- a/kamon-core/src/main/scala/kamon/metric/instrument/UnitOfMeasurement.scala +++ /dev/null @@ -1,109 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -/** - * A UnitOfMeasurement implementation describes the magnitude of a quantity being measured, such as Time and computer - * Memory space. Kamon uses UnitOfMeasurement implementations just as a informative companion to metrics inside entity - * recorders and might be used to scale certain kinds of measurements in metric backends. - */ -trait UnitOfMeasurement { - type U <: UnitOfMeasurement - - def name: String - def label: String - def scale(toUnit: U)(value: Double): Double = value - - def tryScale(toUnit: UnitOfMeasurement)(value: Double): Double = - if (canScale(toUnit)) scale(toUnit.asInstanceOf[U])(value) - else throw new IllegalArgumentException(s"Can't scale different types of units `$name` and `${toUnit.name}`") - - protected def canScale(toUnit: UnitOfMeasurement): Boolean - -} - -object UnitOfMeasurement { - case object Unknown extends UnitOfMeasurement { - override type U = Unknown.type - val name = "unknown" - val label = "unknown" - - override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isUnknown(toUnit) - } - - def isUnknown(uom: UnitOfMeasurement): Boolean = - uom == Unknown - - def isTime(uom: UnitOfMeasurement): Boolean = - uom.isInstanceOf[Time] - - def isMemory(uom: UnitOfMeasurement): Boolean = - uom.isInstanceOf[Memory] - -} - -/** - * UnitOfMeasurement representing time. - */ -case class Time(factor: Double, label: String) extends UnitOfMeasurement { - override type U = Time - val name = "time" - - override def scale(toUnit: Time)(value: Double): Double = - (value * factor) / toUnit.factor - - override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isTime(toUnit) -} - -object Time { - val Nanoseconds = Time(1E-9, "n") - val Microseconds = Time(1E-6, "µs") - val Milliseconds = Time(1E-3, "ms") - val Seconds = Time(1, "s") - - val units = List(Nanoseconds, Microseconds, Milliseconds, Seconds) - - def apply(time: String): Time = units.find(_.label.toLowerCase == time.toLowerCase) getOrElse { - throw new IllegalArgumentException(s"Can't recognize time unit '$time'") - } -} - -/** - * UnitOfMeasurement representing computer memory space. - */ -case class Memory(factor: Double, label: String) extends UnitOfMeasurement { - override type U = Memory - val name = "bytes" - - override def scale(toUnit: Memory)(value: Double): Double = - (value * factor) / toUnit.factor - - override protected def canScale(toUnit: UnitOfMeasurement): Boolean = UnitOfMeasurement.isMemory(toUnit) -} - -object Memory { - val Bytes = Memory(1, "b") - val KiloBytes = Memory(1024, "Kb") - val MegaBytes = Memory(1024 * 1024, "Mb") - val GigaBytes = Memory(1024 * 1024 * 1024, "Gb") - - val units = List(Bytes, KiloBytes, MegaBytes, GigaBytes) - - def apply(memory: String): Memory = units.find(_.label.toLowerCase == memory.toLowerCase) getOrElse { - throw new IllegalArgumentException(s"Can't recognize memory unit '$memory'") - } -} diff --git a/kamon-core/src/main/scala/kamon/trace/Incubator.scala b/kamon-core/src/main/scala/kamon/trace/Incubator.scala deleted file mode 100644 index fe5ad569..00000000 --- a/kamon-core/src/main/scala/kamon/trace/Incubator.scala +++ /dev/null @@ -1,94 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import akka.actor.{ActorLogging, Props, Actor, ActorRef} -import kamon.trace.Incubator.{CheckForCompletedTraces, IncubatingTrace} -import kamon.util.{NanoInterval, RelativeNanoTimestamp} -import scala.annotation.tailrec -import scala.collection.immutable.Queue -import kamon.util.ConfigTools.Syntax - -class Incubator(subscriptions: ActorRef) extends Actor with ActorLogging { - import context.dispatcher - val config = context.system.settings.config.getConfig("kamon.trace.incubator") - - val minIncubationTime = new NanoInterval(config.getFiniteDuration("min-incubation-time").toNanos) - val maxIncubationTime = new NanoInterval(config.getFiniteDuration("max-incubation-time").toNanos) - val checkInterval = config.getFiniteDuration("check-interval") - - val checkSchedule = context.system.scheduler.schedule(checkInterval, checkInterval, self, CheckForCompletedTraces) - var waitingForMinimumIncubation = Queue.empty[IncubatingTrace] - var waitingForIncubationFinish = List.empty[IncubatingTrace] - - def receive = { - case tc: TracingContext ⇒ incubate(tc) - case CheckForCompletedTraces ⇒ - checkWaitingForMinimumIncubation() - checkWaitingForIncubationFinish() - } - - def incubate(tc: TracingContext): Unit = - waitingForMinimumIncubation = waitingForMinimumIncubation.enqueue(IncubatingTrace(tc, RelativeNanoTimestamp.now)) - - @tailrec private def checkWaitingForMinimumIncubation(): Unit = { - if (waitingForMinimumIncubation.nonEmpty) { - val it = waitingForMinimumIncubation.head - if (NanoInterval.since(it.incubationStart) >= minIncubationTime) { - waitingForMinimumIncubation = waitingForMinimumIncubation.tail - - if (it.tc.shouldIncubate) - waitingForIncubationFinish = it :: waitingForIncubationFinish - else - dispatchTraceInfo(it.tc) - - checkWaitingForMinimumIncubation() - } - } - } - - private def checkWaitingForIncubationFinish(): Unit = { - waitingForIncubationFinish = waitingForIncubationFinish.filter { - case IncubatingTrace(context, incubationStart) ⇒ - if (!context.shouldIncubate) { - dispatchTraceInfo(context) - false - } else { - if (NanoInterval.since(incubationStart) >= maxIncubationTime) { - log.warning("Trace [{}] with token [{}] has reached the maximum incubation time, will be reported as is.", context.name, context.token) - dispatchTraceInfo(context); - false - } else true - } - } - } - - def dispatchTraceInfo(tc: TracingContext): Unit = subscriptions ! tc.generateTraceInfo - - override def postStop(): Unit = { - super.postStop() - checkSchedule.cancel() - } -} - -object Incubator { - - def props(subscriptions: ActorRef): Props = Props(new Incubator(subscriptions)) - - case object CheckForCompletedTraces - case class IncubatingTrace(tc: TracingContext, incubationStart: RelativeNanoTimestamp) -} diff --git a/kamon-core/src/main/scala/kamon/trace/MetricsOnlyContext.scala b/kamon-core/src/main/scala/kamon/trace/MetricsOnlyContext.scala deleted file mode 100644 index bc0bedba..00000000 --- a/kamon-core/src/main/scala/kamon/trace/MetricsOnlyContext.scala +++ /dev/null @@ -1,186 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import java.util.concurrent.ConcurrentLinkedQueue - -import akka.event.LoggingAdapter -import kamon.Kamon -import kamon.metric.{SegmentMetrics, TraceMetrics} -import kamon.util.{NanoInterval, RelativeNanoTimestamp} - -import scala.annotation.tailrec -import scala.collection.concurrent.TrieMap - -private[kamon] class MetricsOnlyContext( - traceName: String, - val token: String, - traceTags: Map[String, String], - currentStatus: Status, - val levelOfDetail: LevelOfDetail, - val startTimestamp: RelativeNanoTimestamp, - log: LoggingAdapter -) extends TraceContext { - - @volatile private var _name = traceName - @volatile private var _status = currentStatus - @volatile protected var _elapsedTime = NanoInterval.default - - private val _finishedSegments = new ConcurrentLinkedQueue[SegmentLatencyData]() - private val _traceLocalStorage = new TraceLocalStorage - private val _tags = TrieMap.empty[String, String] ++= traceTags - - def rename(newName: String): Unit = - if (Status.Open == status) - _name = newName - else - log.warning("Can't rename trace from [{}] to [{}] because the trace is already closed.", name, newName) - - def name: String = _name - def tags: Map[String, String] = _tags.toMap - def isEmpty: Boolean = false - def status: Status = _status - def addMetadata(key: String, value: String): Unit = {} - def addTag(key: String, value: String): Unit = _tags.put(key, value) - def removeTag(key: String, value: String): Boolean = _tags.remove(key, value) - - private def finish(withError: Boolean): Unit = { - _status = if (withError) Status.FinishedWithError else Status.FinishedSuccessfully - val traceElapsedTime = NanoInterval.since(startTimestamp) - _elapsedTime = traceElapsedTime - - if (Kamon.metrics.shouldTrack(name, TraceMetrics.category)) { - val traceEntity = Kamon.metrics.entity(TraceMetrics, name, _tags.toMap) - traceEntity.elapsedTime.record(traceElapsedTime.nanos) - if (withError) traceEntity.errors.increment() - } - drainFinishedSegments() - } - - def finish(): Unit = finish(withError = false) - - def finishWithError(cause: Throwable): Unit = { - //we should do something with the Throwable in a near future - finish(withError = true) - } - - def startSegment(segmentName: String, category: String, library: String): Segment = - startSegment(segmentName, category, library, Map.empty[String, String]) - - def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = - new MetricsOnlySegment(segmentName, category, library, tags) - - @tailrec private def drainFinishedSegments(): Unit = { - val segment = _finishedSegments.poll() - if (segment != null) { - val defaultTags = Map( - "trace" → name, - "category" → segment.category, - "library" → segment.library - ) - - if (Kamon.metrics.shouldTrack(segment.name, SegmentMetrics.category)) { - val segmentEntity = Kamon.metrics.entity(SegmentMetrics, segment.name, defaultTags ++ segment.tags) - segmentEntity.elapsedTime.record(segment.duration.nanos) - if (segment.isFinishedWithError) segmentEntity.errors.increment() - } - drainFinishedSegments() - } - } - - protected def finishSegment( - segmentName: String, - category: String, - library: String, - duration: NanoInterval, - segmentTags: Map[String, String], - isFinishedWithError: Boolean - ): Unit = { - - _finishedSegments.add(SegmentLatencyData(segmentName, category, library, duration, segmentTags, isFinishedWithError)) - - if (isClosed) { - drainFinishedSegments() - } - } - - // Should only be used by the TraceLocal utilities. - def traceLocalStorage: TraceLocalStorage = _traceLocalStorage - - // Handle with care and make sure that the trace is closed before calling this method, otherwise NanoInterval.default - // will be returned. - def elapsedTime: NanoInterval = _elapsedTime - - class MetricsOnlySegment( - segmentName: String, - val category: String, - val library: String, - segmentTags: Map[String, String] - ) extends Segment { - - private val _startTimestamp = RelativeNanoTimestamp.now - private val _tags = TrieMap.empty[String, String] ++= segmentTags - - @volatile private var _segmentName = segmentName - @volatile private var _elapsedTime = NanoInterval.default - @volatile private var _status: Status = Status.Open - - def name: String = _segmentName - def tags: Map[String, String] = _tags.toMap - def isEmpty: Boolean = false - def status: Status = _status - def addMetadata(key: String, value: String): Unit = {} - def addTag(key: String, value: String): Unit = _tags.put(key, value) - def removeTag(key: String, value: String): Boolean = _tags.remove(key, value) - - def rename(newName: String): Unit = - if (Status.Open == status) - _segmentName = newName - else - log.warning("Can't rename segment from [{}] to [{}] because the segment is already closed.", name, newName) - - private def finish(withError: Boolean): Unit = { - _status = if (withError) Status.FinishedWithError else Status.FinishedSuccessfully - val segmentElapsedTime = NanoInterval.since(_startTimestamp) - _elapsedTime = segmentElapsedTime - - finishSegment(name, category, library, segmentElapsedTime, _tags.toMap, withError) - } - - def finishWithError(cause: Throwable): Unit = { - //we should do something with the Throwable in a near future - finish(withError = true) - } - - def finish(): Unit = finish(withError = false) - - // Handle with care and make sure that the segment is closed before calling this method, otherwise - // NanoInterval.default will be returned. - def elapsedTime: NanoInterval = _elapsedTime - def startTimestamp: RelativeNanoTimestamp = _startTimestamp - - } -} - -case class SegmentLatencyData( - name: String, - category: String, - library: String, - duration: NanoInterval, - tags: Map[String, String], - isFinishedWithError: Boolean -) \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/trace/Sampler.scala b/kamon-core/src/main/scala/kamon/trace/Sampler.scala deleted file mode 100644 index 234c67bb..00000000 --- a/kamon-core/src/main/scala/kamon/trace/Sampler.scala +++ /dev/null @@ -1,101 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import java.net.InetAddress -import java.util.concurrent.atomic.AtomicLong -import kamon.util.{NanoTimestamp, NanoInterval, Sequencer} -import scala.concurrent.forkjoin.ThreadLocalRandom - -import scala.util.Try - -trait Sampler { - def shouldTrace: Boolean - def shouldReport(traceElapsedTime: NanoInterval): Boolean -} - -object NoSampling extends Sampler { - def shouldTrace: Boolean = false - def shouldReport(traceElapsedTime: NanoInterval): Boolean = false -} - -object SampleAll extends Sampler { - def shouldTrace: Boolean = true - def shouldReport(traceElapsedTime: NanoInterval): Boolean = true -} - -class RandomSampler(chance: Int) extends Sampler { - require(chance > 0, "kamon.trace.random-sampler.chance cannot be <= 0") - require(chance <= 100, "kamon.trace.random-sampler.chance cannot be > 100") - - def shouldTrace: Boolean = ThreadLocalRandom.current().nextInt(100) <= chance - def shouldReport(traceElapsedTime: NanoInterval): Boolean = true -} - -class OrderedSampler(interval: Int) extends Sampler { - import OrderedSampler._ - - require(interval > 0, "kamon.trace.ordered-sampler.sample-interval cannot be <= 0") - assume(interval isPowerOfTwo, "kamon.trace.ordered-sampler.sample-interval must be power of two") - - private val sequencer = Sequencer() - - def shouldTrace: Boolean = (sequencer.next() fastMod interval) == 0 - def shouldReport(traceElapsedTime: NanoInterval): Boolean = true -} - -object OrderedSampler { - implicit class EnhancedInt(i: Int) { - def isPowerOfTwo = (i & (i - 1)) == 0 - } - - implicit class EnhancedLong(dividend: Long) { - def fastMod(divisor: Int) = dividend & (divisor - 1) - } -} - -class ThresholdSampler(thresholdInNanoseconds: NanoInterval) extends Sampler { - require(thresholdInNanoseconds.nanos > 0, "kamon.trace.threshold-sampler.minimum-elapsed-time cannot be <= 0") - - def shouldTrace: Boolean = true - def shouldReport(traceElapsedTime: NanoInterval): Boolean = traceElapsedTime >= thresholdInNanoseconds -} - -class ClockSampler(pauseInNanoseconds: NanoInterval) extends Sampler { - require(pauseInNanoseconds.nanos > 0, "kamon.trace.clock-sampler.pause cannot be <= 0") - - private val timer: AtomicLong = new AtomicLong(0L) - - def shouldTrace: Boolean = { - val now = NanoTimestamp.now.nanos - val lastTimer = timer.get() - if ((lastTimer + pauseInNanoseconds.nanos) < now) - timer.compareAndSet(lastTimer, now) - else - false - } - def shouldReport(traceElapsedTime: NanoInterval): Boolean = true -} - -class DefaultTokenGenerator extends Function0[String] { - private val _hostnamePrefix = Try(InetAddress.getLocalHost.getHostName).getOrElse("unknown-localhost") - private val _tokenCounter = new AtomicLong - - def apply(): String = { - _hostnamePrefix + "-" + String.valueOf(_tokenCounter.incrementAndGet()) - } -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/trace/TraceContext.scala b/kamon-core/src/main/scala/kamon/trace/TraceContext.scala deleted file mode 100644 index bbf40d8d..00000000 --- a/kamon-core/src/main/scala/kamon/trace/TraceContext.scala +++ /dev/null @@ -1,202 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import java.io.ObjectStreamException -import java.util - -import kamon.trace.Status.Closed -import kamon.trace.TraceContextAware.DefaultTraceContextAware -import kamon.util.{Function, RelativeNanoTimestamp, SameThreadExecutionContext, Supplier} - -import scala.concurrent.Future - -trait TraceContext { - def name: String - def token: String - def tags: Map[String, String] - def isEmpty: Boolean - def nonEmpty: Boolean = !isEmpty - def isClosed: Boolean = !(Status.Open == status) - def status: Status - def finish(): Unit - def finishWithError(cause: Throwable): Unit - def rename(newName: String): Unit - def startSegment(segmentName: String, category: String, library: String): Segment - def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment - def addMetadata(key: String, value: String): Unit - def addTag(key: String, value: String): Unit - def removeTag(key: String, value: String): Boolean - def startTimestamp: RelativeNanoTimestamp - - def collect[T](f: TraceContext ⇒ T): Option[T] = - if (nonEmpty) - Some(f(this)) - else None - - def collect[T](f: Function[TraceContext, T]): Option[T] = - if (nonEmpty) - Some(f(this)) - else None - - def withNewSegment[T](segmentName: String, category: String, library: String)(code: ⇒ T): T = { - withNewSegment(segmentName, category, library, Map.empty[String, String])(code) - } - - def withNewSegment[T](segmentName: String, category: String, library: String, tags: Map[String, String])(code: ⇒ T): T = { - val segment = startSegment(segmentName, category, library, tags) - try code finally segment.finish() - } - - def withNewAsyncSegment[T](segmentName: String, category: String, library: String)(code: ⇒ Future[T]): Future[T] = { - withNewAsyncSegment(segmentName, category, library, Map.empty[String, String])(code) - } - - def withNewAsyncSegment[T](segmentName: String, category: String, library: String, tags: Map[String, String])(code: ⇒ Future[T]): Future[T] = { - val segment = startSegment(segmentName, category, library, tags) - val result = code - result.onComplete(_ ⇒ segment.finish())(SameThreadExecutionContext) - result - } - - // Java variant. - def withNewSegment[T](segmentName: String, category: String, library: String, code: Supplier[T]): T = - withNewSegment(segmentName, category, library)(code.get) - - def withNewSegment[T](segmentName: String, category: String, library: String, tags: util.Map[String, String], code: Supplier[T]): T = { - import scala.collection.JavaConverters._ - withNewSegment(segmentName, category, library, tags.asScala.toMap)(code.get) - } -} - -trait Segment { - def name: String - def category: String - def library: String - def tags: Map[String, String] - def isEmpty: Boolean - def nonEmpty: Boolean = !isEmpty - def isClosed: Boolean = !(Status.Open == status) - def status: Status - def finish(): Unit - def finishWithError(cause: Throwable): Unit - def rename(newName: String): Unit - def addMetadata(key: String, value: String): Unit - def addTag(key: String, value: String): Unit - def removeTag(key: String, value: String): Boolean -} - -case object EmptyTraceContext extends TraceContext { - def name: String = "empty-trace" - def token: String = "" - def tags: Map[String, String] = Map.empty - def isEmpty: Boolean = true - def status: Status = Closed - def finish(): Unit = {} - def finishWithError(cause: Throwable): Unit = {} - def rename(name: String): Unit = {} - def startSegment(segmentName: String, category: String, library: String): Segment = EmptySegment - def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = EmptySegment - def addMetadata(key: String, value: String): Unit = {} - def startTimestamp = new RelativeNanoTimestamp(0L) - def addTag(key: String, value: String): Unit = {} - def removeTag(key: String, value: String): Boolean = false - - case object EmptySegment extends Segment { - val name: String = "empty-segment" - val category: String = "empty-category" - val library: String = "empty-library" - def tags: Map[String, String] = Map.empty - def isEmpty: Boolean = true - def status: Status = Closed - def finish(): Unit = {} - def finishWithError(cause: Throwable): Unit = {} - def rename(newName: String): Unit = {} - def addMetadata(key: String, value: String): Unit = {} - def addTag(key: String, value: String): Unit = {} - def removeTag(key: String, value: String): Boolean = false - } -} - -object SegmentCategory { - val HttpClient = "http-client" - val Database = "database" -} - -class LOD private[trace] (val level: Int) extends AnyVal -object LOD { - val MetricsOnly = new LOD(1) - val SimpleTrace = new LOD(2) -} - -sealed trait LevelOfDetail -object LevelOfDetail { - case object MetricsOnly extends LevelOfDetail - case object SimpleTrace extends LevelOfDetail - case object FullTrace extends LevelOfDetail -} - -sealed trait Status -object Status { - case object Open extends Status - case object Closed extends Status - case object FinishedWithError extends Status - case object FinishedSuccessfully extends Status -} - -trait TraceContextAware extends Serializable { - def traceContext: TraceContext -} - -object TraceContextAware { - def default: TraceContextAware = new DefaultTraceContextAware - - class DefaultTraceContextAware extends TraceContextAware { - @transient val traceContext = Tracer.currentContext - - // - // Beware of this hack, it might bite us in the future! - // - // When using remoting/cluster all messages carry the TraceContext in the envelope in which they - // are sent but that doesn't apply to System Messages. We are certain that the TraceContext is - // available (if any) when the system messages are read and this will make sure that it is correctly - // captured and propagated. - @throws[ObjectStreamException] - private def readResolve: AnyRef = { - new DefaultTraceContextAware - } - } -} - -trait TimestampedTraceContextAware extends TraceContextAware { - def captureNanoTime: Long -} - -object TimestampedTraceContextAware { - def default: TimestampedTraceContextAware = new DefaultTraceContextAware with TimestampedTraceContextAware { - @transient val captureNanoTime = System.nanoTime() - } -} - -trait SegmentAware { - @volatile @transient var segment: Segment = EmptyTraceContext.EmptySegment -} - -object SegmentAware { - def default: SegmentAware = new DefaultSegmentAware - class DefaultSegmentAware extends DefaultTraceContextAware with SegmentAware {} -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/trace/TraceLocal.scala b/kamon-core/src/main/scala/kamon/trace/TraceLocal.scala deleted file mode 100644 index 460e4b22..00000000 --- a/kamon-core/src/main/scala/kamon/trace/TraceLocal.scala +++ /dev/null @@ -1,64 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import kamon.trace.TraceLocal.TraceLocalKey -import kamon.util.Supplier -import scala.collection.concurrent.TrieMap - -object TraceLocal { - - trait TraceLocalKey[T] - - trait AvailableToMdc extends TraceLocalKey[String] { - def mdcKey: String - } - - object AvailableToMdc { - case class DefaultKeyAvailableToMdc(mdcKey: String) extends AvailableToMdc - - def fromKey(mdcKey: String): AvailableToMdc = DefaultKeyAvailableToMdc(mdcKey) - def apply(mdcKey: String): AvailableToMdc = fromKey(mdcKey) - } - - def store[T](key: TraceLocalKey[T])(value: Any): Unit = Tracer.currentContext match { - case ctx: MetricsOnlyContext ⇒ ctx.traceLocalStorage.store(key)(value) - case EmptyTraceContext ⇒ // Can't store in the empty context. - } - - def retrieve[T](key: TraceLocalKey[T]): Option[T] = Tracer.currentContext match { - case ctx: MetricsOnlyContext ⇒ ctx.traceLocalStorage.retrieve(key) - case EmptyTraceContext ⇒ None // Can't retrieve anything from the empty context. - } - - // Java variant - @throws(classOf[NoSuchElementException]) - def get[T](key: TraceLocalKey[T]): T = retrieve(key).get - - def getOrElse[T](key: TraceLocalKey[T], code: Supplier[T]): T = retrieve(key).getOrElse(code.get) - - def storeForMdc(key: String, value: String): Unit = store(AvailableToMdc.fromKey(key))(value) - - def newTraceLocalKey[T]: TraceLocalKey[T] = new TraceLocalKey[T] {} -} - -class TraceLocalStorage { - val underlyingStorage = TrieMap[TraceLocalKey[_], Any]() - - def store[T](key: TraceLocalKey[T])(value: Any): Unit = underlyingStorage.put(key, value) - def retrieve[T](key: TraceLocalKey[T]): Option[T] = underlyingStorage.get(key).map(_.asInstanceOf[T]) -} diff --git a/kamon-core/src/main/scala/kamon/trace/TraceSettings.scala b/kamon-core/src/main/scala/kamon/trace/TraceSettings.scala deleted file mode 100644 index c3a83e93..00000000 --- a/kamon-core/src/main/scala/kamon/trace/TraceSettings.scala +++ /dev/null @@ -1,51 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import kamon.util.ConfigTools.Syntax -import com.typesafe.config.Config -import kamon.util.NanoInterval - -case class TraceSettings(levelOfDetail: LevelOfDetail, sampler: Sampler, tokenGenerator: () ⇒ String) - -object TraceSettings { - def apply(config: Config): TraceSettings = { - val tracerConfig = config.getConfig("kamon.trace") - - val detailLevel: LevelOfDetail = tracerConfig.getString("level-of-detail") match { - case "metrics-only" ⇒ LevelOfDetail.MetricsOnly - case "simple-trace" ⇒ LevelOfDetail.SimpleTrace - case other ⇒ sys.error(s"Unknown tracer level of detail [$other] present in the configuration file.") - } - - val sampler: Sampler = - if (detailLevel == LevelOfDetail.MetricsOnly) NoSampling - else tracerConfig.getString("sampling") match { - case "all" ⇒ SampleAll - case "random" ⇒ new RandomSampler(tracerConfig.getInt("random-sampler.chance")) - case "ordered" ⇒ new OrderedSampler(tracerConfig.getInt("ordered-sampler.sample-interval")) - case "threshold" ⇒ new ThresholdSampler(new NanoInterval(tracerConfig.getFiniteDuration("threshold-sampler.minimum-elapsed-time").toNanos)) - case "clock" ⇒ new ClockSampler(new NanoInterval(tracerConfig.getFiniteDuration("clock-sampler.pause").toNanos)) - } - - val dynamic = new akka.actor.ReflectiveDynamicAccess(getClass.getClassLoader) - val tokenGeneratorFQN = tracerConfig.getString("token-generator") - val tokenGenerator = dynamic.createInstanceFor[() ⇒ String](tokenGeneratorFQN, Nil).get // let's bubble up any problems. - - TraceSettings(detailLevel, sampler, tokenGenerator) - } -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/trace/TraceSubscriptions.scala b/kamon-core/src/main/scala/kamon/trace/TraceSubscriptions.scala deleted file mode 100644 index 47455ec5..00000000 --- a/kamon-core/src/main/scala/kamon/trace/TraceSubscriptions.scala +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import akka.actor.{Terminated, ActorRef, Actor} - -class TraceSubscriptions extends Actor { - import TraceSubscriptions._ - - var subscribers: List[ActorRef] = Nil - - def receive = { - case Subscribe(newSubscriber) ⇒ - if (!subscribers.contains(newSubscriber)) - subscribers = context.watch(newSubscriber) :: subscribers - - case Unsubscribe(leavingSubscriber) ⇒ - subscribers = subscribers.filter(_ == leavingSubscriber) - - case Terminated(terminatedSubscriber) ⇒ - subscribers = subscribers.filter(_ == terminatedSubscriber) - - case trace: TraceInfo ⇒ - subscribers.foreach(_ ! trace) - } -} - -object TraceSubscriptions { - case class Subscribe(subscriber: ActorRef) - case class Unsubscribe(subscriber: ActorRef) -} diff --git a/kamon-core/src/main/scala/kamon/trace/TracerModule.scala b/kamon-core/src/main/scala/kamon/trace/TracerModule.scala deleted file mode 100644 index 552962eb..00000000 --- a/kamon-core/src/main/scala/kamon/trace/TracerModule.scala +++ /dev/null @@ -1,197 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import java.util - -import akka.actor._ -import akka.event.{Logging, LoggingAdapter} -import com.typesafe.config.Config -import kamon.Kamon -import kamon.metric.MetricsModule -import kamon.util._ - -import scala.collection.JavaConverters._ - -trait TracerModule { - def newContext(name: String): TraceContext - def newContext(name: String, token: Option[String]): TraceContext - def newContext(name: String, token: Option[String], tags: Map[String, String]): TraceContext - def newContext(name: String, token: Option[String], tags: Map[String, String], timestamp: RelativeNanoTimestamp, state: Status, isLocal: Boolean): TraceContext - - def subscribe(subscriber: ActorRef): Unit - def unsubscribe(subscriber: ActorRef): Unit -} - -object Tracer { - private[kamon] val _traceContextStorage = new ThreadLocal[TraceContext] { - override def initialValue(): TraceContext = EmptyTraceContext - } - - def currentContext: TraceContext = - _traceContextStorage.get() - - def setCurrentContext(context: TraceContext): Unit = - _traceContextStorage.set(context) - - def clearCurrentContext: Unit = - _traceContextStorage.remove() - - def withContext[T](context: TraceContext)(code: ⇒ T): T = { - val oldContext = _traceContextStorage.get() - _traceContextStorage.set(context) - - try code finally _traceContextStorage.set(oldContext) - } - - // Java variant. - def withContext[T](context: TraceContext, code: Supplier[T]): T = - withContext(context)(code.get) - - def withNewContext[T](traceName: String, traceToken: Option[String], tags: Map[String, String], autoFinish: Boolean)(code: ⇒ T): T = { - withContext(Kamon.tracer.newContext(traceName, traceToken, tags)) { - val codeResult = code - if (autoFinish) - currentContext.finish() - - codeResult - } - } - - def withNewContext[T](traceName: String)(code: ⇒ T): T = - withNewContext(traceName, None)(code) - - def withNewContext[T](traceName: String, tags: Map[String, String])(code: ⇒ T): T = - withNewContext(traceName, None, tags)(code) - - def withNewContext[T](traceName: String, traceToken: Option[String])(code: ⇒ T): T = - withNewContext(traceName, traceToken, Map.empty[String, String])(code) - - def withNewContext[T](traceName: String, traceToken: Option[String], tags: Map[String, String])(code: ⇒ T): T = - withNewContext(traceName, traceToken, tags, autoFinish = false)(code) - - def withNewContext[T](traceName: String, autoFinish: Boolean)(code: ⇒ T): T = - withNewContext(traceName, None, Map.empty[String, String], autoFinish)(code) - - def withNewContext[T](traceName: String, tags: Map[String, String], autoFinish: Boolean)(code: ⇒ T): T = - withNewContext(traceName, None, tags, autoFinish)(code) - - // Java variants. - def withNewContext[T](traceName: String, traceToken: Option[String], autoFinish: Boolean, code: Supplier[T]): T = - withNewContext(traceName, traceToken, Map.empty[String, String], autoFinish)(code.get) - - def withNewContext[T](traceName: String, traceToken: Option[String], tags: util.Map[String, String], autoFinish: Boolean, code: Supplier[T]): T = - withNewContext(traceName, traceToken, tags.asScala.toMap, autoFinish)(code.get) - - def withNewContext[T](traceName: String, code: Supplier[T]): T = - withNewContext(traceName, None, Map.empty[String, String], autoFinish = false)(code.get) - - def withNewContext[T](traceName: String, tags: util.Map[String, String], code: Supplier[T]): T = - withNewContext(traceName, None, tags.asScala.toMap, autoFinish = false)(code.get) - - def withNewContext[T](traceName: String, traceToken: Option[String], code: Supplier[T]): T = - withNewContext(traceName, traceToken, Map.empty[String, String], autoFinish = false)(code.get) - - def withNewContext[T](traceName: String, traceToken: Option[String], tags: util.Map[String, String], code: Supplier[T]): T = - withNewContext(traceName, traceToken, tags.asScala.toMap, autoFinish = false)(code.get) - - def withNewContext[T](traceName: String, autoFinish: Boolean, code: Supplier[T]): T = - withNewContext(traceName, None, Map.empty[String, String], autoFinish)(code.get) - - def withNewContext[T](traceName: String, tags: util.Map[String, String], autoFinish: Boolean, code: Supplier[T]): T = - withNewContext(traceName, None, tags.asScala.toMap, autoFinish)(code.get) -} - -private[kamon] class TracerModuleImpl(metricsExtension: MetricsModule, config: Config) extends TracerModule { - @volatile private var _settings = TraceSettings(config) - - private val _subscriptions = new LazyActorRef - private val _incubator = new LazyActorRef - - private def newToken: String = _settings.tokenGenerator() - - def newContext(name: String): TraceContext = - createTraceContext(name, None) - - def newContext(name: String, token: Option[String]): TraceContext = - createTraceContext(name, token) - - def newContext(name: String, token: Option[String], tags: Map[String, String]): TraceContext = - createTraceContext(name, token, tags) - - def newContext(name: String, token: Option[String], tags: Map[String, String], timestamp: RelativeNanoTimestamp, status: Status, isLocal: Boolean): TraceContext = - createTraceContext(name, token, tags, timestamp, status, isLocal) - - private def createTraceContext(traceName: String, token: Option[String], tags: Map[String, String] = Map.empty, startTimestamp: RelativeNanoTimestamp = RelativeNanoTimestamp.now, - status: Status = Status.Open, isLocal: Boolean = true): TraceContext = { - - def newMetricsOnlyContext(token: String): TraceContext = - new MetricsOnlyContext(traceName, token, tags, status, _settings.levelOfDetail, startTimestamp, _logger) - - val traceToken = token.getOrElse(newToken) - - _settings.levelOfDetail match { - case LevelOfDetail.MetricsOnly ⇒ - newMetricsOnlyContext(traceToken) - case _ if !isLocal || !_settings.sampler.shouldTrace ⇒ - newMetricsOnlyContext(traceToken) - case _ ⇒ - new TracingContext(traceName, traceToken, tags, currentStatus = Status.Open, _settings.levelOfDetail, isLocal, startTimestamp, _logger, dispatchTracingContext) - } - } - - def subscribe(subscriber: ActorRef): Unit = - _subscriptions.tell(TraceSubscriptions.Subscribe(subscriber)) - - def unsubscribe(subscriber: ActorRef): Unit = - _subscriptions.tell(TraceSubscriptions.Unsubscribe(subscriber)) - - private[kamon] def dispatchTracingContext(trace: TracingContext): Unit = - if (_settings.sampler.shouldReport(trace.elapsedTime)) - if (trace.shouldIncubate) - _incubator.tell(trace) - else - _subscriptions.tell(trace.generateTraceInfo) - - /** - * Tracer Extension initialization. - */ - private var _system: ActorSystem = null - private var _logger: LoggingAdapter = null - private lazy val _start = { - val subscriptions = _system.actorOf(Props[TraceSubscriptions], "trace-subscriptions") - _subscriptions.point(subscriptions) - _incubator.point(_system.actorOf(Incubator.props(subscriptions))) - } - - def start(system: ActorSystem, newConfig: Config): Unit = synchronized { - _settings = TraceSettings(newConfig) - _system = system - _logger = Logging(_system, "TracerModule") - _start - _system = null - } -} - -private[kamon] object TracerModuleImpl { - - def apply(metricsExtension: MetricsModule, config: Config) = - new TracerModuleImpl(metricsExtension, config) -} - -case class TraceInfo(name: String, token: String, timestamp: NanoTimestamp, elapsedTime: NanoInterval, metadata: Map[String, String], tags: Map[String, String], segments: List[SegmentInfo], status: Status) -case class SegmentInfo(name: String, category: String, library: String, timestamp: NanoTimestamp, elapsedTime: NanoInterval, metadata: Map[String, String], tags: Map[String, String], status: Status) \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/trace/TracingContext.scala b/kamon-core/src/main/scala/kamon/trace/TracingContext.scala deleted file mode 100644 index ff128f85..00000000 --- a/kamon-core/src/main/scala/kamon/trace/TracingContext.scala +++ /dev/null @@ -1,113 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import java.util.concurrent.ConcurrentLinkedQueue -import java.util.concurrent.atomic.AtomicInteger - -import akka.event.LoggingAdapter -import kamon.util.{NanoInterval, NanoTimestamp, RelativeNanoTimestamp} - -import scala.collection.concurrent.TrieMap - -private[trace] class TracingContext( - traceName: String, - token: String, - traceTags: Map[String, String], - currentStatus: Status, - levelOfDetail: LevelOfDetail, - isLocal: Boolean, - startTimeztamp: RelativeNanoTimestamp, - log: LoggingAdapter, - traceInfoSink: TracingContext ⇒ Unit -) extends MetricsOnlyContext(traceName, token, traceTags, currentStatus, levelOfDetail, startTimeztamp, log) { - - private val _openSegments = new AtomicInteger(0) - private val _startTimestamp = NanoTimestamp.now - private val _allSegments = new ConcurrentLinkedQueue[TracingSegment]() - private val _metadata = TrieMap.empty[String, String] - - override def addMetadata(key: String, value: String): Unit = _metadata.put(key, value) - - override def startSegment(segmentName: String, category: String, library: String): Segment = { - startSegment(segmentName, category, library, Map.empty[String, String]) - } - - override def startSegment(segmentName: String, category: String, library: String, tags: Map[String, String]): Segment = { - _openSegments.incrementAndGet() - val newSegment = new TracingSegment(segmentName, category, library, tags) - _allSegments.add(newSegment) - newSegment - } - - override def finish(): Unit = { - super.finish() - traceInfoSink(this) - } - - override def finishWithError(cause: Throwable): Unit = { - super.finishWithError(cause) - traceInfoSink(this) - } - - override def finishSegment(segmentName: String, category: String, library: String, duration: NanoInterval, tags: Map[String, String], isFinishedWithError: Boolean = false): Unit = { - _openSegments.decrementAndGet() - super.finishSegment(segmentName, category, library, duration, tags, isFinishedWithError) - } - - def shouldIncubate: Boolean = (Status.Open == status) || _openSegments.get() > 0 - - // Handle with care, should only be used after a trace is finished. - def generateTraceInfo: TraceInfo = { - require(isClosed, "Can't generated a TraceInfo if the Trace has not closed yet.") - - val currentSegments = _allSegments.iterator() - var segmentsInfo = List.newBuilder[SegmentInfo] - - while (currentSegments.hasNext) { - val segment = currentSegments.next() - if (segment.isClosed) - segmentsInfo += segment.createSegmentInfo(_startTimestamp, startTimestamp) - else - log.warning("Segment [{}] will be left out of TraceInfo because it was still open.", segment.name) - } - - TraceInfo(name, token, _startTimestamp, elapsedTime, _metadata.toMap, tags, segmentsInfo.result(), status) - } - - class TracingSegment( - segmentName: String, - category: String, - library: String, - segmentTags: Map[String, String] - ) extends MetricsOnlySegment(segmentName, category, library, segmentTags) { - - private val metadata = TrieMap.empty[String, String] - override def addMetadata(key: String, value: String): Unit = metadata.put(key, value) - - // Handle with care, should only be used after the segment has finished. - def createSegmentInfo(traceStartTimestamp: NanoTimestamp, traceRelativeTimestamp: RelativeNanoTimestamp): SegmentInfo = { - require(isClosed, "Can't generated a SegmentInfo if the Segment has not closed yet.") - - // We don't have a epoch-based timestamp for the segments because calling System.currentTimeMillis() is both - // expensive and inaccurate, but we can do that once for the trace and calculate all the segments relative to it. - val segmentStartTimestamp = new NanoTimestamp((this.startTimestamp.nanos - traceRelativeTimestamp.nanos) + traceStartTimestamp.nanos) - - SegmentInfo(this.name, category, library, segmentStartTimestamp, this.elapsedTime, metadata.toMap, tags, status) - } - } -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala b/kamon-core/src/main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala deleted file mode 100644 index 8177ed14..00000000 --- a/kamon-core/src/main/scala/kamon/trace/logging/LogbackTraceTokenConverter.scala +++ /dev/null @@ -1,26 +0,0 @@ -/* =================================================== - * Copyright © 2013 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ========================================================== */ -package kamon.trace.logging - -import ch.qos.logback.classic.pattern.ClassicConverter -import ch.qos.logback.classic.spi.ILoggingEvent -import kamon.trace.Tracer - -class LogbackTraceTokenConverter extends ClassicConverter { - - def convert(event: ILoggingEvent): String = - Tracer.currentContext.collect(_.token).getOrElse("undefined") -} diff --git a/kamon-core/src/main/scala/kamon/trace/logging/MdcKeysSupport.scala b/kamon-core/src/main/scala/kamon/trace/logging/MdcKeysSupport.scala deleted file mode 100644 index 556366b0..00000000 --- a/kamon-core/src/main/scala/kamon/trace/logging/MdcKeysSupport.scala +++ /dev/null @@ -1,54 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace.logging - -import kamon.trace.TraceLocal.AvailableToMdc -import kamon.trace.{EmptyTraceContext, MetricsOnlyContext, TraceContext, Tracer} -import kamon.util.Supplier -import org.slf4j.MDC - -trait MdcKeysSupport { - - val traceTokenKey = "traceToken" - val traceNameKey = "traceName" - - private val defaultKeys = Seq(traceTokenKey, traceNameKey) - - def withMdc[A](thunk: ⇒ A): A = { - val keys = copyToMdc(Tracer.currentContext) - try thunk finally keys.foreach(key ⇒ MDC.remove(key)) - } - - // Java variant. - def withMdc[A](thunk: Supplier[A]): A = withMdc(thunk.get) - - private[kamon] def copyToMdc(traceContext: TraceContext): Iterable[String] = traceContext match { - case ctx: MetricsOnlyContext ⇒ - - // Add the default key value pairs for the trace token and trace name. - MDC.put(traceTokenKey, ctx.token) - MDC.put(traceNameKey, ctx.name) - - defaultKeys ++ ctx.traceLocalStorage.underlyingStorage.collect { - case (available: AvailableToMdc, value) ⇒ Map(available.mdcKey → String.valueOf(value)) - }.flatMap { value ⇒ value.map { case (k, v) ⇒ MDC.put(k, v); k } } - - case EmptyTraceContext ⇒ Iterable.empty[String] - } -} - -object MdcKeysSupport extends MdcKeysSupport \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/util/ConfigTools.scala b/kamon-core/src/main/scala/kamon/util/ConfigTools.scala deleted file mode 100644 index cbf530e4..00000000 --- a/kamon-core/src/main/scala/kamon/util/ConfigTools.scala +++ /dev/null @@ -1,48 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import java.util.concurrent.TimeUnit - -import com.typesafe.config.Config - -import scala.concurrent.duration.FiniteDuration - -import kamon.metric.instrument.{Memory, Time} - -object ConfigTools { - implicit class Syntax(val config: Config) extends AnyVal { - // We are using the deprecated .getNanoseconds option to keep Kamon source code compatible with - // versions of Akka using older typesafe-config versions. - - def getFiniteDuration(path: String): FiniteDuration = - FiniteDuration(config.getDuration(path, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS) - - def firstLevelKeys: Set[String] = { - import scala.collection.JavaConverters._ - - config.entrySet().asScala.map { - case entry ⇒ entry.getKey.takeWhile(_ != '.') - } toSet - } - - def time(path: String): Time = Time(config.getString(path)) - - def memory(path: String): Memory = Memory(config.getString(path)) - } - -} diff --git a/kamon-core/src/main/scala/kamon/util/FastDispatch.scala b/kamon-core/src/main/scala/kamon/util/FastDispatch.scala deleted file mode 100644 index 7a94aefc..00000000 --- a/kamon-core/src/main/scala/kamon/util/FastDispatch.scala +++ /dev/null @@ -1,38 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import akka.actor.ActorRef - -import scala.concurrent.{ExecutionContext, Future} - -/** - * Extension for Future[ActorRef]. Try to dispatch a message to a Future[ActorRef] in the same thread if it has already - * completed or do the regular scheduling otherwise. Specially useful when using the ModuleSupervisor extension to - * create actors. - */ -object FastDispatch { - implicit class Syntax(val target: Future[ActorRef]) extends AnyVal { - - def fastDispatch(message: Any)(implicit ec: ExecutionContext): Unit = - if (target.isCompleted) - target.value.get.map(_ ! message) - else - target.map(_ ! message) - } - -} diff --git a/kamon-core/src/main/scala/kamon/util/FunctionalInterfaces.scala b/kamon-core/src/main/scala/kamon/util/FunctionalInterfaces.scala deleted file mode 100644 index 8dab6519..00000000 --- a/kamon-core/src/main/scala/kamon/util/FunctionalInterfaces.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -trait Supplier[T] { - def get: T -} - -trait Function[T, R] { - def apply(t: T): R -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/util/JavaTags.scala b/kamon-core/src/main/scala/kamon/util/JavaTags.scala deleted file mode 100644 index 90bece5c..00000000 --- a/kamon-core/src/main/scala/kamon/util/JavaTags.scala +++ /dev/null @@ -1,14 +0,0 @@ -package kamon.util - -object JavaTags { - - /** - * Helper method to transform Java maps into Scala maps. Typically this will be used as a static import from - * Java code that wants to use tags, as tags are defined as scala.collection.mutable.Map[String, String] and - * creating them directly from Java is quite verbose. - */ - def tagsFromMap(tags: java.util.Map[String, String]): Map[String, String] = { - import scala.collection.JavaConversions._ - tags.toMap - } -} diff --git a/kamon-core/src/main/scala/kamon/util/Latency.scala b/kamon-core/src/main/scala/kamon/util/Latency.scala deleted file mode 100644 index 52e044f8..00000000 --- a/kamon-core/src/main/scala/kamon/util/Latency.scala +++ /dev/null @@ -1,29 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import kamon.metric.instrument.Histogram - -object Latency { - def measure[A](histogram: Histogram)(thunk: ⇒ A): A = { - val start = RelativeNanoTimestamp.now - try thunk finally { - val latency = NanoInterval.since(start).nanos - histogram.record(latency) - } - } -} diff --git a/kamon-core/src/main/scala/kamon/util/LazyActorRef.scala b/kamon-core/src/main/scala/kamon/util/LazyActorRef.scala deleted file mode 100644 index a07abea6..00000000 --- a/kamon-core/src/main/scala/kamon/util/LazyActorRef.scala +++ /dev/null @@ -1,69 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import java.util -import java.util.concurrent.ConcurrentLinkedQueue - -import akka.actor.{Actor, ActorRef} -import org.HdrHistogram.WriterReaderPhaser - -import scala.annotation.tailrec - -/** - * A LazyActorRef accumulates messages sent to an actor that doesn't exist yet. Once the actor is created and - * the LazyActorRef is pointed to it, all the accumulated messages are flushed and any new message sent to the - * LazyActorRef will immediately be sent to the pointed ActorRef. - * - * This is intended to be used during Kamon's initialization where some components need to use ActorRefs to work - * (like subscriptions and the trace incubator) but our internal ActorSystem is not yet ready to create the - * required actors. - */ -class LazyActorRef { - private val _refPhaser = new WriterReaderPhaser - private val _backlog = new ConcurrentLinkedQueue[(Any, ActorRef)]() - @volatile private var _target: Option[ActorRef] = None - - def tell(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = { - val criticalEnter = _refPhaser.writerCriticalSectionEnter() - try { - _target.map(_.tell(message, sender)) getOrElse { - _backlog.add((message, sender)) - } - - } finally { _refPhaser.writerCriticalSectionExit(criticalEnter) } - } - - def point(target: ActorRef): Unit = { - @tailrec def drain(q: util.Queue[(Any, ActorRef)]): Unit = if (!q.isEmpty) { - val (msg, sender) = q.poll() - target.tell(msg, sender) - drain(q) - } - - try { - _refPhaser.readerLock() - - if (_target.isEmpty) { - _target = Some(target) - _refPhaser.flipPhase(1000L) - drain(_backlog) - - } else sys.error("A LazyActorRef cannot be pointed more than once.") - } finally { _refPhaser.readerUnlock() } - } -} diff --git a/kamon-core/src/main/scala/kamon/util/MapMerge.scala b/kamon-core/src/main/scala/kamon/util/MapMerge.scala deleted file mode 100644 index 6fc6fb15..00000000 --- a/kamon-core/src/main/scala/kamon/util/MapMerge.scala +++ /dev/null @@ -1,43 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -object MapMerge { - - /** - * Merge two immutable maps with the same key and value types, using the provided valueMerge function. - */ - implicit class Syntax[K, V](val map: Map[K, V]) extends AnyVal { - def merge(that: Map[K, V], valueMerge: (V, V) ⇒ V): Map[K, V] = { - val merged = Map.newBuilder[K, V] - - map.foreach { - case (key, value) ⇒ - val mergedValue = that.get(key).map(v ⇒ valueMerge(value, v)).getOrElse(value) - merged += key → mergedValue - } - - that.foreach { - case kv @ (key, _) if !map.contains(key) ⇒ merged += kv - case other ⇒ // ignore, already included. - } - - merged.result(); - } - } - -} diff --git a/kamon-core/src/main/scala/kamon/util/NeedToScale.scala b/kamon-core/src/main/scala/kamon/util/NeedToScale.scala deleted file mode 100644 index 1397050f..00000000 --- a/kamon-core/src/main/scala/kamon/util/NeedToScale.scala +++ /dev/null @@ -1,37 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import com.typesafe.config.Config -import kamon.metric.instrument.{Memory, Time} -import kamon.util.ConfigTools._ - -object NeedToScale { - val TimeUnits = "time-units" - val MemoryUnits = "memory-units" - - def unapply(config: Config): Option[(Option[Time], Option[Memory])] = { - val scaleTimeTo: Option[Time] = - if (config.hasPath(TimeUnits)) Some(config.time(TimeUnits)) else None - - val scaleMemoryTo: Option[Memory] = - if (config.hasPath(MemoryUnits)) Some(config.memory(MemoryUnits)) else None - if (scaleTimeTo.isDefined || scaleMemoryTo.isDefined) Some(scaleTimeTo → scaleMemoryTo) - else None - } -} - diff --git a/kamon-core/src/main/scala/kamon/util/PaddedAtomicLong.scala b/kamon-core/src/main/scala/kamon/util/PaddedAtomicLong.scala deleted file mode 100644 index 9019eb4c..00000000 --- a/kamon-core/src/main/scala/kamon/util/PaddedAtomicLong.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import java.util.concurrent.atomic.AtomicLong - -class PaddedAtomicLong(value: Long = 0) extends AtomicLong(value) { - @volatile var p1, p2, p3, p4, p5, p6 = 7L - - protected def sumPaddingToPreventOptimisation() = p1 + p2 + p3 + p4 + p5 + p6 -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/util/PathFilter.scala b/kamon-core/src/main/scala/kamon/util/PathFilter.scala deleted file mode 100644 index 50e14ace..00000000 --- a/kamon-core/src/main/scala/kamon/util/PathFilter.scala +++ /dev/null @@ -1,5 +0,0 @@ -package kamon.util - -trait PathFilter { - def accept(path: String): Boolean -} diff --git a/kamon-core/src/main/scala/kamon/util/RegexPathFilter.scala b/kamon-core/src/main/scala/kamon/util/RegexPathFilter.scala deleted file mode 100644 index 848fca87..00000000 --- a/kamon-core/src/main/scala/kamon/util/RegexPathFilter.scala +++ /dev/null @@ -1,27 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -case class RegexPathFilter(path: String) extends PathFilter { - private val pathRegex = path.r - override def accept(path: String): Boolean = { - path match { - case pathRegex(_*) ⇒ true - case _ ⇒ false - } - } -} diff --git a/kamon-core/src/main/scala/kamon/util/SameThreadExecutionContext.scala b/kamon-core/src/main/scala/kamon/util/SameThreadExecutionContext.scala deleted file mode 100644 index 5fb0d066..00000000 --- a/kamon-core/src/main/scala/kamon/util/SameThreadExecutionContext.scala +++ /dev/null @@ -1,30 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import kamon.util.logger.LazyLogger -import scala.concurrent.ExecutionContext - -/** - * For small code blocks that don't need to be run on a separate thread. - */ -object SameThreadExecutionContext extends ExecutionContext { - val logger = LazyLogger("SameThreadExecutionContext") - - override def execute(runnable: Runnable): Unit = runnable.run - override def reportFailure(t: Throwable): Unit = logger.error(t.getMessage, t) -} diff --git a/kamon-core/src/main/scala/kamon/util/Sequencer.scala b/kamon-core/src/main/scala/kamon/util/Sequencer.scala deleted file mode 100644 index f368e54f..00000000 --- a/kamon-core/src/main/scala/kamon/util/Sequencer.scala +++ /dev/null @@ -1,40 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -/** - * This class implements an extremely efficient, thread-safe way to generate a - * incrementing sequence of Longs with a simple Long overflow protection. - */ -class Sequencer { - private val CloseToOverflow = Long.MaxValue - 1000000000 - private val sequenceNumber = new PaddedAtomicLong(1L) - - def next(): Long = { - val current = sequenceNumber.getAndIncrement - - // check if this value is getting close to overflow? - if (current > CloseToOverflow) { - sequenceNumber.set(current - CloseToOverflow) // we need maintain the order - } - current - } -} - -object Sequencer { - def apply(): Sequencer = new Sequencer() -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/util/Timestamp.scala b/kamon-core/src/main/scala/kamon/util/Timestamp.scala deleted file mode 100644 index 002dc470..00000000 --- a/kamon-core/src/main/scala/kamon/util/Timestamp.scala +++ /dev/null @@ -1,107 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -/** - * Epoch time stamp. - */ -case class Timestamp(seconds: Long) extends AnyVal { - def <(that: Timestamp): Boolean = this.seconds < that.seconds - def >(that: Timestamp): Boolean = this.seconds > that.seconds - def ==(that: Timestamp): Boolean = this.seconds == that.seconds - def >=(that: Timestamp): Boolean = this.seconds >= that.seconds - def <=(that: Timestamp): Boolean = this.seconds <= that.seconds - - override def toString: String = String.valueOf(seconds) + ".seconds" -} - -object Timestamp { - def now: Timestamp = new Timestamp(System.currentTimeMillis() / 1000) - def earlier(l: Timestamp, r: Timestamp): Timestamp = if (l <= r) l else r - def later(l: Timestamp, r: Timestamp): Timestamp = if (l >= r) l else r -} - -/** - * Epoch time stamp in milliseconds. - */ -case class MilliTimestamp(millis: Long) extends AnyVal { - override def toString: String = String.valueOf(millis) + ".millis" - - def toTimestamp: Timestamp = new Timestamp(millis / 1000) - def toRelativeNanoTimestamp: RelativeNanoTimestamp = { - val diff = (System.currentTimeMillis() - millis) * 1000000 - new RelativeNanoTimestamp(System.nanoTime() - diff) - } -} - -object MilliTimestamp { - def now: MilliTimestamp = new MilliTimestamp(System.currentTimeMillis()) -} - -/** - * Epoch time stamp in nanoseconds. - * - * NOTE: This doesn't have any better precision than MilliTimestamp, it is just a convenient way to get a epoch - * timestamp in nanoseconds. - */ -case class NanoTimestamp(nanos: Long) extends AnyVal { - def -(that: NanoTimestamp) = new NanoTimestamp(nanos - that.nanos) - def +(that: NanoTimestamp) = new NanoTimestamp(nanos + that.nanos) - override def toString: String = String.valueOf(nanos) + ".nanos" -} - -object NanoTimestamp { - def now: NanoTimestamp = new NanoTimestamp(System.currentTimeMillis() * 1000000) -} - -/** - * Number of nanoseconds between a arbitrary origin timestamp provided by the JVM via System.nanoTime() - */ -case class RelativeNanoTimestamp(nanos: Long) extends AnyVal { - def -(that: RelativeNanoTimestamp) = new RelativeNanoTimestamp(nanos - that.nanos) - def +(that: RelativeNanoTimestamp) = new RelativeNanoTimestamp(nanos + that.nanos) - override def toString: String = String.valueOf(nanos) + ".nanos" - - def toMilliTimestamp: MilliTimestamp = - new MilliTimestamp(System.currentTimeMillis - ((System.nanoTime - nanos) / 1000000)) -} - -object RelativeNanoTimestamp { - val zero = new RelativeNanoTimestamp(0L) - - def now: RelativeNanoTimestamp = new RelativeNanoTimestamp(System.nanoTime()) - def relativeTo(milliTimestamp: MilliTimestamp): RelativeNanoTimestamp = - new RelativeNanoTimestamp(now.nanos - (MilliTimestamp.now.millis - milliTimestamp.millis) * 1000000) -} - -/** - * Number of nanoseconds that passed between two points in time. - */ -case class NanoInterval(nanos: Long) extends AnyVal { - def <(that: NanoInterval): Boolean = this.nanos < that.nanos - def >(that: NanoInterval): Boolean = this.nanos > that.nanos - def ==(that: NanoInterval): Boolean = this.nanos == that.nanos - def >=(that: NanoInterval): Boolean = this.nanos >= that.nanos - def <=(that: NanoInterval): Boolean = this.nanos <= that.nanos - - override def toString: String = String.valueOf(nanos) + ".nanos" -} - -object NanoInterval { - def default: NanoInterval = new NanoInterval(0L) - def since(relative: RelativeNanoTimestamp): NanoInterval = new NanoInterval(System.nanoTime() - relative.nanos) -} diff --git a/kamon-core/src/main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala b/kamon-core/src/main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala deleted file mode 100644 index d1197a5a..00000000 --- a/kamon-core/src/main/scala/kamon/util/TriemapAtomicGetOrElseUpdate.scala +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ -package kamon.util - -import scala.collection.concurrent.TrieMap - -object TriemapAtomicGetOrElseUpdate { - - /** - * Workaround to the non thread-safe [[scala.collection.concurrent.TrieMap#getOrElseUpdate]] method. More details on - * why this is necessary can be found at [[https://issues.scala-lang.org/browse/SI-7943]]. - */ - implicit class Syntax[K, V](val trieMap: TrieMap[K, V]) extends AnyVal { - - def atomicGetOrElseUpdate(key: K, op: ⇒ V): V = - atomicGetOrElseUpdate(key, op, { v: V ⇒ Unit }) - - def atomicGetOrElseUpdate(key: K, op: ⇒ V, cleanup: V ⇒ Unit): V = - trieMap.get(key) match { - case Some(v) ⇒ v - case None ⇒ - val d = op - trieMap.putIfAbsent(key, d).map { oldValue ⇒ - // If there was an old value then `d` was never added - // and thus need to be cleanup. - cleanup(d) - oldValue - - } getOrElse (d) - } - } -} diff --git a/kamon-core/src/main/scala/kamon/util/executors/ExecutorMetricRecorder.scala b/kamon-core/src/main/scala/kamon/util/executors/ExecutorMetricRecorder.scala deleted file mode 100644 index a4bca570..00000000 --- a/kamon-core/src/main/scala/kamon/util/executors/ExecutorMetricRecorder.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util.executors - -import kamon.metric.{EntityRecorderFactory, GenericEntityRecorder} -import kamon.metric.instrument.{Gauge, MinMaxCounter, DifferentialValueCollector, InstrumentFactory} -import java.util.concurrent.{ForkJoinPool ⇒ JavaForkJoinPool, ThreadPoolExecutor} -import kamon.util.executors.ForkJoinPools.ForkJoinMetrics - -import scala.concurrent.forkjoin.ForkJoinPool - -object ForkJoinPools extends ForkJoinLowPriority { - trait ForkJoinMetrics[T] { - def getParallelism(fjp: T): Long - def getPoolSize(fjp: T): Long - def getActiveThreadCount(fjp: T): Long - def getRunningThreadCount(fjp: T): Long - def getQueuedTaskCount(fjp: T): Long - def getQueuedSubmissionCount(fjp: T): Long - } - - implicit object JavaForkJoin extends ForkJoinMetrics[JavaForkJoinPool] { - def getParallelism(fjp: JavaForkJoinPool) = fjp.getParallelism - def getPoolSize(fjp: JavaForkJoinPool) = fjp.getPoolSize.toLong - def getRunningThreadCount(fjp: JavaForkJoinPool) = fjp.getActiveThreadCount.toLong - def getActiveThreadCount(fjp: JavaForkJoinPool) = fjp.getRunningThreadCount.toLong - def getQueuedTaskCount(fjp: JavaForkJoinPool) = fjp.getQueuedTaskCount - def getQueuedSubmissionCount(fjp: JavaForkJoinPool) = fjp.getQueuedSubmissionCount - } -} - -trait ForkJoinLowPriority { - implicit object ScalaForkJoin extends ForkJoinMetrics[ForkJoinPool] { - def getParallelism(fjp: ForkJoinPool) = fjp.getParallelism - def getPoolSize(fjp: ForkJoinPool) = fjp.getPoolSize.toLong - def getRunningThreadCount(fjp: ForkJoinPool) = fjp.getActiveThreadCount.toLong - def getActiveThreadCount(fjp: ForkJoinPool) = fjp.getRunningThreadCount.toLong - def getQueuedTaskCount(fjp: ForkJoinPool) = fjp.getQueuedTaskCount - def getQueuedSubmissionCount(fjp: ForkJoinPool) = fjp.getQueuedSubmissionCount - } -} - -abstract class ForkJoinPoolMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { - def paralellism: MinMaxCounter - def poolSize: Gauge - def activeThreads: Gauge - def runningThreads: Gauge - def queuedTaskCount: Gauge - def queuedSubmissionCount: Gauge -} - -object ForkJoinPoolMetrics { - def factory[T: ForkJoinMetrics](fjp: T, categoryName: String) = new EntityRecorderFactory[ForkJoinPoolMetrics] { - val forkJoinMetrics = implicitly[ForkJoinMetrics[T]] - - def category: String = categoryName - def createRecorder(instrumentFactory: InstrumentFactory) = new ForkJoinPoolMetrics(instrumentFactory) { - val paralellism = minMaxCounter("parallelism") - paralellism.increment(forkJoinMetrics.getParallelism(fjp)) // Steady value. - - val poolSize = gauge("pool-size", forkJoinMetrics.getPoolSize(fjp)) - val activeThreads = gauge("active-threads", forkJoinMetrics.getActiveThreadCount(fjp)) - val runningThreads = gauge("running-threads", forkJoinMetrics.getRunningThreadCount(fjp)) - val queuedTaskCount = gauge("queued-task-count", forkJoinMetrics.getQueuedTaskCount(fjp)) - val queuedSubmissionCount = gauge("queued-submission-count", forkJoinMetrics.getQueuedSubmissionCount(fjp)) - } - } -} - -class ThreadPoolExecutorMetrics(tpe: ThreadPoolExecutor, instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { - val corePoolSize = gauge("core-pool-size", tpe.getCorePoolSize.toLong) - val maxPoolSize = gauge("max-pool-size", tpe.getMaximumPoolSize.toLong) - val poolSize = gauge("pool-size", tpe.getPoolSize.toLong) - val activeThreads = gauge("active-threads", tpe.getActiveCount.toLong) - val processedTasks = gauge("processed-tasks", DifferentialValueCollector(() ⇒ { - tpe.getTaskCount - })) -} - -object ThreadPoolExecutorMetrics { - def factory(tpe: ThreadPoolExecutor, cat: String) = new EntityRecorderFactory[ThreadPoolExecutorMetrics] { - def category: String = cat - def createRecorder(instrumentFactory: InstrumentFactory) = new ThreadPoolExecutorMetrics(tpe, instrumentFactory) - } -} diff --git a/kamon-core/src/main/scala/kamon/util/executors/ExecutorServiceMetrics.scala b/kamon-core/src/main/scala/kamon/util/executors/ExecutorServiceMetrics.scala deleted file mode 100644 index 60612beb..00000000 --- a/kamon-core/src/main/scala/kamon/util/executors/ExecutorServiceMetrics.scala +++ /dev/null @@ -1,134 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util.executors - -import java.util.concurrent.{ExecutorService, ForkJoinPool ⇒ JavaForkJoinPool, ThreadPoolExecutor} - -import kamon.Kamon -import kamon.metric.Entity - -import scala.concurrent.forkjoin.ForkJoinPool -import scala.util.control.NoStackTrace - -object ExecutorServiceMetrics { - val Category = "executor-service" - - private val DelegatedExecutor = Class.forName("java.util.concurrent.Executors$DelegatedExecutorService") - private val FinalizableDelegated = Class.forName("java.util.concurrent.Executors$FinalizableDelegatedExecutorService") - private val DelegateScheduled = Class.forName("java.util.concurrent.Executors$DelegatedScheduledExecutorService") - private val JavaForkJoinPool = classOf[JavaForkJoinPool] - private val ScalaForkJoinPool = classOf[ForkJoinPool] - - private val executorField = { - // executorService is private :( - val field = DelegatedExecutor.getDeclaredField("e") - field.setAccessible(true) - field - } - - /** - * - * Register the [[http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html ThreadPoolExecutor]] to Monitor. - * - * @param name The name of the [[ThreadPoolExecutor]] - * @param threadPool The intance of the [[ThreadPoolExecutor]] - * @param tags The tags associated to the [[ThreadPoolExecutor]] - */ - @inline private def registerThreadPool(name: String, threadPool: ThreadPoolExecutor, tags: Map[String, String]): Entity = { - val threadPoolEntity = Entity(name, Category, tags + ("executor-type" → "thread-pool-executor")) - Kamon.metrics.entity(ThreadPoolExecutorMetrics.factory(threadPool, Category), threadPoolEntity) - threadPoolEntity - } - - /** - * - * Register the [[http://www.scala-lang.org/api/current/index.html#scala.collection.parallel.TaskSupport ForkJoinPool]] to Monitor. - * - * @param name The name of the [[ForkJoinPool]] - * @param forkJoinPool The instance of the [[ForkJoinPool]] - * @param tags The tags associated to the [[ForkJoinPool]] - */ - @inline private def registerScalaForkJoin(name: String, forkJoinPool: ForkJoinPool, tags: Map[String, String]): Entity = { - val forkJoinEntity = Entity(name, Category, tags + ("executor-type" → "fork-join-pool")) - Kamon.metrics.entity(ForkJoinPoolMetrics.factory(forkJoinPool, Category), forkJoinEntity) - forkJoinEntity - } - - /** - * - * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ForkJoinPool.html JavaForkJoinPool]] to Monitor. - * - * @param name The name of the [[JavaForkJoinPool]] - * @param forkJoinPool The instance of the [[JavaForkJoinPool]] - * @param tags The tags associated to the [[JavaForkJoinPool]] - */ - @inline private def registerJavaForkJoin(name: String, forkJoinPool: JavaForkJoinPool, tags: Map[String, String]): Entity = { - val forkJoinEntity = Entity(name, Category, tags + ("executor-type" → "fork-join-pool")) - Kamon.metrics.entity(ForkJoinPoolMetrics.factory(forkJoinPool, Category), forkJoinEntity) - forkJoinEntity - } - - /** - * - * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html ExecutorService]] to Monitor. - * - * @param name The name of the [[ExecutorService]] - * @param executorService The instance of the [[ExecutorService]] - * @param tags The tags associated to the [[ExecutorService]] - */ - def register(name: String, executorService: ExecutorService, tags: Map[String, String]): Entity = executorService match { - case threadPoolExecutor: ThreadPoolExecutor ⇒ registerThreadPool(name, threadPoolExecutor, tags) - case scalaForkJoinPool: ForkJoinPool if scalaForkJoinPool.getClass.isAssignableFrom(ScalaForkJoinPool) ⇒ registerScalaForkJoin(name, scalaForkJoinPool, tags) - case javaForkJoinPool: JavaForkJoinPool if javaForkJoinPool.getClass.isAssignableFrom(JavaForkJoinPool) ⇒ registerJavaForkJoin(name, javaForkJoinPool, tags) - case delegatedExecutor: ExecutorService if delegatedExecutor.getClass.isAssignableFrom(DelegatedExecutor) ⇒ registerDelegatedExecutor(name, delegatedExecutor, tags) - case delegatedScheduledExecutor: ExecutorService if delegatedScheduledExecutor.getClass.isAssignableFrom(DelegateScheduled) ⇒ registerDelegatedExecutor(name, delegatedScheduledExecutor, tags) - case finalizableDelegatedExecutor: ExecutorService if finalizableDelegatedExecutor.getClass.isAssignableFrom(FinalizableDelegated) ⇒ registerDelegatedExecutor(name, finalizableDelegatedExecutor, tags) - case other ⇒ throw NotSupportedException(s"The ExecutorService $name is not supported.") - } - - //Java variant - def register(name: String, executorService: ExecutorService, tags: java.util.Map[String, String]): Entity = { - import scala.collection.JavaConverters._ - register(name, executorService, tags.asScala.toMap) - } - - /** - * - * Register the [[https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html ExecutorService]] to Monitor. - * - * @param name The name of the [[ExecutorService]] - * @param executorService The instance of the [[ExecutorService]] - */ - def register(name: String, executorService: ExecutorService): Entity = { - register(name, executorService, Map.empty[String, String]) - } - - def remove(entity: Entity): Unit = Kamon.metrics.removeEntity(entity) - - /** - * INTERNAL USAGE ONLY - */ - private def registerDelegatedExecutor(name: String, executor: ExecutorService, tags: Map[String, String]) = { - val underlyingExecutor = executorField.get(executor) match { - case executorService: ExecutorService ⇒ executorService - case other ⇒ other - } - register(name, underlyingExecutor.asInstanceOf[ExecutorService], tags) - } - - case class NotSupportedException(message: String) extends RuntimeException with NoStackTrace -} \ No newline at end of file diff --git a/kamon-core/src/main/scala/kamon/util/http/HttpServerMetrics.scala b/kamon-core/src/main/scala/kamon/util/http/HttpServerMetrics.scala deleted file mode 100644 index 929fef4f..00000000 --- a/kamon-core/src/main/scala/kamon/util/http/HttpServerMetrics.scala +++ /dev/null @@ -1,41 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util.http - -import kamon.metric.{EntityRecorderFactory, GenericEntityRecorder} -import kamon.metric.instrument.InstrumentFactory - -/** - * Counts HTTP response status codes into per status code and per trace name + status counters. If recording a HTTP - * response with status 500 for the trace "GetUser", the counter with name "500" as well as the counter with name - * "GetUser_500" will be incremented. - */ -class HttpServerMetrics(instrumentFactory: InstrumentFactory) extends GenericEntityRecorder(instrumentFactory) { - - def recordResponse(statusCode: String): Unit = - counter(statusCode).increment() - - def recordResponse(traceName: String, statusCode: String): Unit = { - recordResponse(statusCode) - counter(traceName + "_" + statusCode).increment() - } -} - -object HttpServerMetrics extends EntityRecorderFactory[HttpServerMetrics] { - def category: String = "http-server" - def createRecorder(instrumentFactory: InstrumentFactory): HttpServerMetrics = new HttpServerMetrics(instrumentFactory) -} diff --git a/kamon-core/src/main/scala/kamon/util/logger/LazyLogger.scala b/kamon-core/src/main/scala/kamon/util/logger/LazyLogger.scala deleted file mode 100644 index 11be7bbe..00000000 --- a/kamon-core/src/main/scala/kamon/util/logger/LazyLogger.scala +++ /dev/null @@ -1,49 +0,0 @@ -/* ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util.logger - -import org.slf4j.{Logger ⇒ SLF4JLogger} - -class LazyLogger(val logger: SLF4JLogger) { - - @inline final def isTraceEnabled = logger.isTraceEnabled - @inline final def trace(msg: ⇒ String): Unit = if (isTraceEnabled) logger.trace(msg.toString) - @inline final def trace(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isTraceEnabled) logger.trace(msg, t) - - @inline final def isDebugEnabled = logger.isDebugEnabled - @inline final def debug(msg: ⇒ String): Unit = if (isDebugEnabled) logger.debug(msg.toString) - @inline final def debug(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isDebugEnabled) logger.debug(msg, t) - - @inline final def isErrorEnabled = logger.isErrorEnabled - @inline final def error(msg: ⇒ String): Unit = if (isErrorEnabled) logger.error(msg.toString) - @inline final def error(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isErrorEnabled) logger.error(msg, t) - - @inline final def isInfoEnabled = logger.isInfoEnabled - @inline final def info(msg: ⇒ String): Unit = if (isInfoEnabled) logger.info(msg.toString) - @inline final def info(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isInfoEnabled) logger.info(msg, t) - - @inline final def isWarnEnabled = logger.isWarnEnabled - @inline final def warn(msg: ⇒ String): Unit = if (isWarnEnabled) logger.warn(msg.toString) - @inline final def warn(msg: ⇒ String, t: ⇒ Throwable): Unit = if (isWarnEnabled) logger.warn(msg, t) -} - -object LazyLogger { - import scala.reflect.{classTag, ClassTag} - - def apply(name: String): LazyLogger = new LazyLogger(org.slf4j.LoggerFactory.getLogger(name)) - def apply(cls: Class[_]): LazyLogger = apply(cls.getName) - def apply[C: ClassTag](): LazyLogger = apply(classTag[C].runtimeClass.getName) -} \ No newline at end of file diff --git a/kamon-core/src/test/resources/application.conf b/kamon-core/src/test/resources/application.conf deleted file mode 100644 index bf6123d9..00000000 --- a/kamon-core/src/test/resources/application.conf +++ /dev/null @@ -1,26 +0,0 @@ -akka { - loggers = ["akka.event.slf4j.Slf4jLogger"] -} - -kamon { - metric { - tick-interval = 1 hour - default-collection-context-buffer-size = 100 - } - - trace { - level-of-detail = simple-trace - sampling = all - } - - default-tags = { - name = "jason" - number = 42 - username = ${USER} - list = [1, 2, 3] // lists do not make sense for a tag - object = { - nested-bool = true - nested-string = "a string" - } - } -} \ No newline at end of file diff --git a/kamon-core/src/test/resources/logback.xml b/kamon-core/src/test/resources/logback.xml deleted file mode 100644 index b467456f..00000000 --- a/kamon-core/src/test/resources/logback.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n - - - - - - - \ No newline at end of file diff --git a/kamon-core/src/test/scala/kamon/KamonLifecycleSpec.scala b/kamon-core/src/test/scala/kamon/KamonLifecycleSpec.scala deleted file mode 100644 index 2fbd1b71..00000000 --- a/kamon-core/src/test/scala/kamon/KamonLifecycleSpec.scala +++ /dev/null @@ -1,93 +0,0 @@ -package kamon - -import akka.actor.ActorSystem -import akka.testkit.{TestKitBase, TestProbe} -import com.typesafe.config.ConfigFactory -import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot -import kamon.metric.{EntitySnapshot, SubscriptionsDispatcher} -import kamon.util.LazyActorRef -import org.scalatest.{Matchers, WordSpecLike} -import org.scalactic.TimesOnInt._ - -import scala.concurrent.duration._ - -class KamonLifecycleSpec extends TestKitBase with WordSpecLike with Matchers { - override implicit lazy val system: ActorSystem = ActorSystem("kamon-lifecycle-spec") - - "The Kamon lifecycle" should { - "allow Kamon to be used before it gets started" in { - val someMetric = Kamon.metrics.histogram("allow-me-before-start") - } - - "allow Kamon to be started/shutdown several times" in { - 10 times { - Kamon.shutdown() - Kamon.start() - Kamon.start() - Kamon.shutdown() - Kamon.shutdown() - } - } - - "not dispatch subscriptions before Kamon startup" in { - val subscriber = TestProbe() - Kamon.metrics.histogram("only-after-startup").record(100) - Kamon.metrics.subscribe("**", "**", subscriber.ref, permanently = true) - - flushSubscriptions() - subscriber.expectNoMsg(300 millis) - - Kamon.metrics.histogram("only-after-startup").record(100) - Kamon.start() - flushSubscriptions() - subscriber.expectMsgType[TickMetricSnapshot] - Kamon.shutdown() - } - - "not dispatch subscriptions after Kamon shutdown" in { - val subscriber = TestProbe() - Kamon.start() - Kamon.metrics.histogram("only-before-shutdown").record(100) - Kamon.metrics.subscribe("**", "**", subscriber.ref, permanently = true) - - flushSubscriptions() - subscriber.expectMsgType[TickMetricSnapshot] - - Kamon.metrics.histogram("only-before-shutdown").record(100) - Kamon.shutdown() - Thread.sleep(500) - flushSubscriptions() - subscriber.expectNoMsg(300 millis) - } - - "reconfigure filters after being started" in { - val customConfig = ConfigFactory.parseString( - """ - |kamon.metric.filters.histogram { - | includes = [ "**" ] - | excludes = ["untracked-histogram"] - |} - """.stripMargin - ) - - Kamon.metrics.shouldTrack("untracked-histogram", "histogram") shouldBe true - Kamon.start(customConfig.withFallback(ConfigFactory.load())) - Kamon.metrics.shouldTrack("untracked-histogram", "histogram") shouldBe false - - } - } - - def takeSnapshotOf(name: String, category: String): EntitySnapshot = { - val collectionContext = Kamon.metrics.buildDefaultCollectionContext - val recorder = Kamon.metrics.find(name, category).get - recorder.collect(collectionContext) - } - - def flushSubscriptions(): Unit = { - val subscriptionsField = Kamon.metrics.getClass.getDeclaredField("_subscriptions") - subscriptionsField.setAccessible(true) - val subscriptions = subscriptionsField.get(Kamon.metrics).asInstanceOf[LazyActorRef] - - subscriptions.tell(SubscriptionsDispatcher.Tick) - } -} diff --git a/kamon-core/src/test/scala/kamon/metric/MetricScaleDecoratorSpec.scala b/kamon-core/src/test/scala/kamon/metric/MetricScaleDecoratorSpec.scala deleted file mode 100644 index d3d0f4fb..00000000 --- a/kamon-core/src/test/scala/kamon/metric/MetricScaleDecoratorSpec.scala +++ /dev/null @@ -1,102 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import kamon.Kamon -import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot -import kamon.metric.instrument.{InstrumentFactory, Memory, Time, UnitOfMeasurement} -import kamon.testkit.BaseKamonSpec -import kamon.util.MilliTimestamp -import org.scalatest.OptionValues._ - -class MetricScaleDecoratorSpec extends BaseKamonSpec("metrics-scale-decorator-spec") with SnapshotFixtures { - "the MetricScaleDecorator" when { - "receives a snapshot" which { - - val scaleDecorator = system.actorOf(MetricScaleDecorator.props( - Some(Time.Milliseconds), Some(Memory.KiloBytes), testActor - )) - "is empty" should { - "do nothing for empty snapshots" in { - scaleDecorator ! emptySnapshot - expectMsg(emptySnapshot) - } - } - "is non empty" should { - scaleDecorator ! nonEmptySnapshot - val scaled = expectMsgType[TickMetricSnapshot] - val snapshot = scaled.metrics(testEntity) - - "scale time metrics" in { - snapshot.histogram("nano-time").value.max should be(10L +- 1L) - snapshot.counter("micro-time").value.count should be(1000L) - } - "scale memory metrics" in { - snapshot.histogram("byte-memory").value.max should be(1) - snapshot.counter("kbyte-memory").value.count should be(100L) - } - "do nothing with unknown metrics" in { - snapshot.histogram("unknown-histogram").value.max should be(1000L) - snapshot.counter("unknown-counter").value.count should be(10L) - } - "not change from and to" in { - scaled.from.millis should be(1000) - scaled.to.millis should be(2000) - } - } - } - } -} - -trait SnapshotFixtures { - self: BaseKamonSpec ⇒ - - class ScaleDecoratorTestMetrics(instrumentFactory: InstrumentFactory) - extends GenericEntityRecorder(instrumentFactory) { - val nanoTime = histogram("nano-time", Time.Nanoseconds) - val microTime = counter("micro-time", Time.Microseconds) - val byteMemory = histogram("byte-memory", Memory.Bytes) - val kbyteMemory = counter("kbyte-memory", Memory.KiloBytes) - val unknownHistogram = histogram("unknown-histogram", UnitOfMeasurement.Unknown) - val unknownCounter = counter("unknown-counter", UnitOfMeasurement.Unknown) - } - - object ScaleDecoratorTestMetrics extends EntityRecorderFactory[ScaleDecoratorTestMetrics] { - override def category: String = "decorator-spec" - - override def createRecorder(instrumentFactory: InstrumentFactory): ScaleDecoratorTestMetrics = - new ScaleDecoratorTestMetrics(instrumentFactory) - } - - val testEntity = Entity("metrics-scale-decorator-spec", "decorator-spec") - val recorder = Kamon.metrics.entity(ScaleDecoratorTestMetrics, "metrics-scale-decorator-spec") - - val emptySnapshot = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map.empty) - - recorder.unknownCounter.increment(10) - recorder.unknownHistogram.record(1000L) - recorder.nanoTime.record(10000000L) - recorder.microTime.increment(1000000L) - recorder.byteMemory.record(1024L) - recorder.kbyteMemory.increment(100L) - - val nonEmptySnapshot = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map( - (testEntity → recorder.collect(collectionContext)) - )) - -} - diff --git a/kamon-core/src/test/scala/kamon/metric/SimpleMetricsSpec.scala b/kamon-core/src/test/scala/kamon/metric/SimpleMetricsSpec.scala deleted file mode 100644 index 6d753888..00000000 --- a/kamon-core/src/test/scala/kamon/metric/SimpleMetricsSpec.scala +++ /dev/null @@ -1,106 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import kamon.Kamon -import kamon.metric.instrument.Histogram.DynamicRange -import kamon.testkit.BaseKamonSpec -import scala.concurrent.duration._ - -class SimpleMetricsSpec extends BaseKamonSpec("simple-metrics-spec") { - - "the SimpleMetrics extension" should { - - "allow registering a fully configured Histogram and get the same Histogram if registering again" in { - val histogramA = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 10000, 2)) - val histogramB = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 10000, 2)) - - histogramA shouldBe theSameInstanceAs(histogramB) - } - - "return the original Histogram when registering a fully configured Histogram for second time but with different settings" in { - val histogramA = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 10000, 2)) - val histogramB = Kamon.metrics.histogram("histogram-with-settings", DynamicRange(1, 50000, 2)) - - histogramA shouldBe theSameInstanceAs(histogramB) - } - - "allow registering a Histogram that takes the default configuration from the kamon.metrics.precision settings" in { - Kamon.metrics.histogram("histogram-with-default-configuration") - } - - "allow registering a Counter and get the same Counter if registering again" in { - val counterA = Kamon.metrics.counter("counter") - val counterB = Kamon.metrics.counter("counter") - - counterA shouldBe theSameInstanceAs(counterB) - } - - "allow registering a fully configured MinMaxCounter and get the same MinMaxCounter if registering again" in { - val minMaxCounterA = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 10000, 2), 1 second) - val minMaxCounterB = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 10000, 2), 1 second) - - minMaxCounterA shouldBe theSameInstanceAs(minMaxCounterB) - } - - "return the original MinMaxCounter when registering a fully configured MinMaxCounter for second time but with different settings" in { - val minMaxCounterA = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 10000, 2), 1 second) - val minMaxCounterB = Kamon.metrics.minMaxCounter("min-max-counter-with-settings", DynamicRange(1, 50000, 2), 1 second) - - minMaxCounterA shouldBe theSameInstanceAs(minMaxCounterB) - } - - "allow registering a MinMaxCounter that takes the default configuration from the kamon.metrics.precision settings" in { - Kamon.metrics.minMaxCounter("min-max-counter-with-default-configuration") - } - - "allow registering a fully configured Gauge and get the same Gauge if registering again" in { - val gaugeA = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) - val gaugeB = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) - - gaugeA shouldBe theSameInstanceAs(gaugeB) - } - - "return the original Gauge when registering a fully configured Gauge for second time but with different settings" in { - val gaugeA = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) - val gaugeB = Kamon.metrics.gauge("gauge-with-settings", DynamicRange(1, 10000, 2), 1 second)(1L) - - gaugeA shouldBe theSameInstanceAs(gaugeB) - } - - "allow registering a Gauge that takes the default configuration from the kamon.metrics.precision settings" in { - Kamon.metrics.gauge("gauge-with-default-configuration")(2L) - } - - "allow un-registering user metrics" in { - val counter = Kamon.metrics.counter("counter-for-remove") - val histogram = Kamon.metrics.histogram("histogram-for-remove") - val minMaxCounter = Kamon.metrics.minMaxCounter("min-max-counter-for-remove") - val gauge = Kamon.metrics.gauge("gauge-for-remove")(2L) - - Kamon.metrics.removeCounter("counter-for-remove") - Kamon.metrics.removeHistogram("histogram-for-remove") - Kamon.metrics.removeMinMaxCounter("min-max-counter-for-remove") - Kamon.metrics.removeGauge("gauge-for-remove") - - counter should not be (theSameInstanceAs(Kamon.metrics.counter("counter-for-remove"))) - histogram should not be (theSameInstanceAs(Kamon.metrics.histogram("histogram-for-remove"))) - minMaxCounter should not be (theSameInstanceAs(Kamon.metrics.minMaxCounter("min-max-counter-for-remove"))) - gauge should not be (theSameInstanceAs(Kamon.metrics.gauge("gauge-for-remove")(2L))) - } - } -} diff --git a/kamon-core/src/test/scala/kamon/metric/SubscriptionsProtocolSpec.scala b/kamon-core/src/test/scala/kamon/metric/SubscriptionsProtocolSpec.scala deleted file mode 100644 index ee34acc7..00000000 --- a/kamon-core/src/test/scala/kamon/metric/SubscriptionsProtocolSpec.scala +++ /dev/null @@ -1,150 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import akka.actor._ -import akka.testkit.{TestProbe, ImplicitSender} -import com.typesafe.config.ConfigFactory -import kamon.Kamon -import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot -import kamon.testkit.BaseKamonSpec -import scala.concurrent.duration._ - -class SubscriptionsProtocolSpec extends BaseKamonSpec("subscriptions-protocol-spec") with ImplicitSender { - override lazy val config = - ConfigFactory.parseString( - """ - |kamon.metric { - | tick-interval = 1 hour - |} - """.stripMargin - ) - - lazy val metricsModule = Kamon.metrics - import metricsModule.{entity, subscribe, unsubscribe} - val defaultTags: Map[String, String] = Kamon.metrics.defaultTags - - "the Subscriptions messaging protocol" should { - "allow subscribing for a single tick" in { - val subscriber = TestProbe() - entity(TraceMetrics, "one-shot") - subscribe("trace", "one-shot", subscriber.ref, permanently = false) - - flushSubscriptions() - val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] - - tickSnapshot.metrics.size should be(1) - tickSnapshot.metrics.keys should contain(Entity("one-shot", "trace", defaultTags)) - - flushSubscriptions() - subscriber.expectNoMsg(1 second) - } - - "subscriptions should include default tags" in { - val subscriber = TestProbe() - - Kamon.metrics.histogram("histogram-with-tags").record(1) - Kamon.metrics.subscribe("**", "**", subscriber.ref, permanently = true) - flushSubscriptions() - - val tickSubscription = subscriber.expectMsgType[TickMetricSnapshot] - tickSubscription.metrics.head._1.tags.get("name") shouldBe Some("jason") - tickSubscription.metrics.head._1.tags.get("number") shouldBe Some("42") - tickSubscription.metrics.head._1.tags.get("username").isDefined shouldBe true - tickSubscription.metrics.head._1.tags.get("object.nested-bool") shouldBe Some("true") - tickSubscription.metrics.head._1.tags.get("object.nested-string") shouldBe Some("a string") - tickSubscription.metrics.head._1.tags.get("list") shouldBe None - } - - "allow subscribing permanently to a metric" in { - val subscriber = TestProbe() - entity(TraceMetrics, "permanent") - subscribe("trace", "permanent", subscriber.ref, permanently = true) - - for (repetition ← 1 to 5) { - flushSubscriptions() - val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] - - tickSnapshot.metrics.size should be(1) - tickSnapshot.metrics.keys should contain(Entity("permanent", "trace", defaultTags)) - } - } - - "allow subscribing to metrics matching a glob pattern" in { - val subscriber = TestProbe() - entity(TraceMetrics, "include-one") - entity(TraceMetrics, "exclude-two") - entity(TraceMetrics, "include-three") - subscribe("trace", "include-*", subscriber.ref, permanently = true) - - for (repetition ← 1 to 5) { - flushSubscriptions() - val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] - - tickSnapshot.metrics.size should be(2) - tickSnapshot.metrics.keys should contain(Entity("include-one", "trace", defaultTags)) - tickSnapshot.metrics.keys should contain(Entity("include-three", "trace", defaultTags)) - } - } - - "send a single TickMetricSnapshot to each subscriber, even if subscribed multiple times" in { - val subscriber = TestProbe() - entity(TraceMetrics, "include-one") - entity(TraceMetrics, "exclude-two") - entity(TraceMetrics, "include-three") - subscribe("trace", "include-one", subscriber.ref, permanently = true) - subscribe("trace", "include-three", subscriber.ref, permanently = true) - - for (repetition ← 1 to 5) { - flushSubscriptions() - val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] - - tickSnapshot.metrics.size should be(2) - tickSnapshot.metrics.keys should contain(Entity("include-one", "trace", defaultTags)) - tickSnapshot.metrics.keys should contain(Entity("include-three", "trace", defaultTags)) - } - } - - "allow un-subscribing a subscriber" in { - val subscriber = TestProbe() - entity(TraceMetrics, "one-shot") - subscribe("trace", "one-shot", subscriber.ref, permanently = true) - - flushSubscriptions() - val tickSnapshot = subscriber.expectMsgType[TickMetricSnapshot] - tickSnapshot.metrics.size should be(1) - tickSnapshot.metrics.keys should contain(Entity("one-shot", "trace", defaultTags)) - - unsubscribe(subscriber.ref) - - flushSubscriptions() - subscriber.expectNoMsg(1 second) - } - } - - def subscriptionsActor: ActorRef = { - val listener = TestProbe() - system.actorSelection("/user/kamon/kamon-metrics").tell(Identify(1), listener.ref) - listener.expectMsgType[ActorIdentity].ref.get - } -} - -class ForwarderSubscriber(target: ActorRef) extends Actor { - def receive = { - case anything ⇒ target.forward(anything) - } -} diff --git a/kamon-core/src/test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala b/kamon-core/src/test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala deleted file mode 100644 index 6172a9e0..00000000 --- a/kamon-core/src/test/scala/kamon/metric/TickMetricSnapshotBufferSpec.scala +++ /dev/null @@ -1,97 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import com.typesafe.config.ConfigFactory -import kamon.Kamon -import kamon.metric.instrument.Histogram.MutableRecord -import kamon.testkit.BaseKamonSpec -import kamon.util.MilliTimestamp -import akka.testkit.ImplicitSender -import scala.concurrent.duration._ -import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot - -class TickMetricSnapshotBufferSpec extends BaseKamonSpec("trace-metrics-spec") with ImplicitSender { - - "the TickMetricSnapshotBuffer" should { - "merge TickMetricSnapshots received until the flush timeout is reached and fix the from/to fields" in new SnapshotFixtures { - val buffer = system.actorOf(TickMetricSnapshotBuffer.props(3 seconds, testActor)) - - buffer ! firstEmpty - buffer ! secondEmpty - buffer ! thirdEmpty - - within(2 seconds)(expectNoMsg()) - val mergedSnapshot = expectMsgType[TickMetricSnapshot] - - mergedSnapshot.from.millis should equal(1000) - mergedSnapshot.to.millis should equal(4000) - mergedSnapshot.metrics should be('empty) - } - - "merge empty and non-empty snapshots" in new SnapshotFixtures { - val buffer = system.actorOf(TickMetricSnapshotBuffer.props(3 seconds, testActor)) - - buffer ! firstNonEmpty - buffer ! secondNonEmpty - buffer ! thirdEmpty - - within(2 seconds)(expectNoMsg()) - val mergedSnapshot = expectMsgType[TickMetricSnapshot] - - mergedSnapshot.from.millis should equal(1000) - mergedSnapshot.to.millis should equal(4000) - mergedSnapshot.metrics should not be ('empty) - - val testMetricSnapshot = mergedSnapshot.metrics(testTraceIdentity).histogram("elapsed-time").get - testMetricSnapshot.min should equal(10) - testMetricSnapshot.max should equal(300) - testMetricSnapshot.numberOfMeasurements should equal(6) - testMetricSnapshot.recordsIterator.toStream should contain allOf ( - MutableRecord(10, 3), - MutableRecord(20, 1), - MutableRecord(30, 1), - MutableRecord(300, 1) - ) - - } - } - - trait SnapshotFixtures { - val collectionContext = Kamon.metrics.buildDefaultCollectionContext - val testTraceIdentity = Entity("buffer-spec-test-trace", "trace") - val traceRecorder = Kamon.metrics.entity(TraceMetrics, "buffer-spec-test-trace") - - val firstEmpty = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map.empty) - val secondEmpty = TickMetricSnapshot(new MilliTimestamp(2000), new MilliTimestamp(3000), Map.empty) - val thirdEmpty = TickMetricSnapshot(new MilliTimestamp(3000), new MilliTimestamp(4000), Map.empty) - - traceRecorder.elapsedTime.record(10L) - traceRecorder.elapsedTime.record(20L) - traceRecorder.elapsedTime.record(30L) - val firstNonEmpty = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map( - (testTraceIdentity → traceRecorder.collect(collectionContext)) - )) - - traceRecorder.elapsedTime.record(10L) - traceRecorder.elapsedTime.record(10L) - traceRecorder.elapsedTime.record(300L) - val secondNonEmpty = TickMetricSnapshot(new MilliTimestamp(1000), new MilliTimestamp(2000), Map( - (testTraceIdentity → traceRecorder.collect(collectionContext)) - )) - } -} diff --git a/kamon-core/src/test/scala/kamon/metric/TraceMetricsSpec.scala b/kamon-core/src/test/scala/kamon/metric/TraceMetricsSpec.scala deleted file mode 100644 index 4c44dc07..00000000 --- a/kamon-core/src/test/scala/kamon/metric/TraceMetricsSpec.scala +++ /dev/null @@ -1,121 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric - -import akka.testkit.ImplicitSender -import com.typesafe.config.ConfigFactory -import kamon.testkit.BaseKamonSpec -import kamon.trace.Tracer -import kamon.metric.instrument.Histogram - -import scala.util.control.NoStackTrace - -class TraceMetricsSpec extends BaseKamonSpec("trace-metrics-spec") with ImplicitSender { - - "the TraceMetrics" should { - "record the elapsed time between a trace creation and finish" in { - for (repetitions ← 1 to 10) { - Tracer.withContext(newContext("record-elapsed-time")) { - Tracer.currentContext.finish() - } - } - - val snapshot = takeSnapshotOf("record-elapsed-time", "trace") - snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(10) - } - - "record the elapsed time for segments that occur inside a given trace" in { - Tracer.withContext(newContext("trace-with-segments")) { - val segment = Tracer.currentContext.startSegment("test-segment", "test-category", "test-library") - segment.finish() - Tracer.currentContext.finish() - } - - val snapshot = takeSnapshotOf("test-segment", "trace-segment", - tags = Map( - "trace" → "trace-with-segments", - "category" → "test-category", - "library" → "test-library" - )) - - snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) - } - - "record the elapsed time for segments that finish after their correspondent trace has finished" in { - val segment = Tracer.withContext(newContext("closing-segment-after-trace")) { - val s = Tracer.currentContext.startSegment("test-segment", "test-category", "test-library") - Tracer.currentContext.finish() - s - } - - val beforeFinishSegmentSnapshot = takeSnapshotOf("closing-segment-after-trace", "trace") - beforeFinishSegmentSnapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) - - intercept[NoSuchElementException] { - // The segment metric should not exist before we it has finished. - - takeSnapshotOf("test-segment", "trace-segment", - tags = Map( - "trace" → "closing-segment-after-trace", - "category" → "test-category", - "library" → "test-library" - )) - } - - segment.finish() - - val afterFinishSegmentSnapshot = takeSnapshotOf("test-segment", "trace-segment", - tags = Map( - "trace" → "closing-segment-after-trace", - "category" → "test-category", - "library" → "test-library" - )) - - afterFinishSegmentSnapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) - } - - "record the elapsed time between a trace creation and finish with an error" in { - for (repetitions ← 1 to 10) { - Tracer.withContext(newContext("record-elapsed-time-with-error")) { - Tracer.currentContext.finishWithError(new RuntimeException("awesome-trace-error") with NoStackTrace) - } - } - - val snapshot = takeSnapshotOf("record-elapsed-time-with-error", "trace") - snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(10) - snapshot.counter("errors").get.count should be(10) - } - - "record the elapsed time for segments that finish with an error and that occur inside a given trace" in { - Tracer.withContext(newContext("trace-with-segments")) { - val segment = Tracer.currentContext.startSegment("test-segment-with-error", "test-category", "test-library") - segment.finishWithError(new RuntimeException("awesome-segment-error") with NoStackTrace) - Tracer.currentContext.finish() - } - - val snapshot = takeSnapshotOf("test-segment-with-error", "trace-segment", - tags = Map( - "trace" → "trace-with-segments", - "category" → "test-category", - "library" → "test-library" - )) - - snapshot.histogram("elapsed-time").get.numberOfMeasurements should be(1) - snapshot.counter("errors").get.count should be(1) - } - } -} \ No newline at end of file diff --git a/kamon-core/src/test/scala/kamon/metric/instrument/CounterSpec.scala b/kamon-core/src/test/scala/kamon/metric/instrument/CounterSpec.scala deleted file mode 100644 index d5d651e5..00000000 --- a/kamon-core/src/test/scala/kamon/metric/instrument/CounterSpec.scala +++ /dev/null @@ -1,79 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import java.nio.LongBuffer - -import org.scalatest.{Matchers, WordSpec} - -class CounterSpec extends WordSpec with Matchers { - - "a Counter" should { - "allow increment only operations" in new CounterFixture { - counter.increment() - counter.increment(10) - - intercept[UnsupportedOperationException] { - counter.increment(-10) - } - } - - "reset to zero when a snapshot is taken" in new CounterFixture { - counter.increment(100) - takeSnapshotFrom(counter).count should be(100) - takeSnapshotFrom(counter).count should be(0) - takeSnapshotFrom(counter).count should be(0) - - counter.increment(50) - takeSnapshotFrom(counter).count should be(50) - takeSnapshotFrom(counter).count should be(0) - } - - "produce a snapshot that can be merged with others" in new CounterFixture { - val counterA = Counter() - val counterB = Counter() - counterA.increment(100) - counterB.increment(200) - - val counterASnapshot = takeSnapshotFrom(counterA) - val counterBSnapshot = takeSnapshotFrom(counterB) - - counterASnapshot.merge(counterBSnapshot, collectionContext).count should be(300) - counterBSnapshot.merge(counterASnapshot, collectionContext).count should be(300) - } - - "produce a snapshot that can be scaled" in new CounterFixture { - counter.increment(100) - - val counterSnapshot = takeSnapshotFrom(counter) - - val scaledSnapshot = counterSnapshot.scale(Time.Milliseconds, Time.Microseconds) - scaledSnapshot.count should be(100000) - } - - } - - trait CounterFixture { - val counter = Counter() - - val collectionContext = new CollectionContext { - val buffer: LongBuffer = LongBuffer.allocate(1) - } - - def takeSnapshotFrom(counter: Counter): Counter.Snapshot = counter.collect(collectionContext) - } -} diff --git a/kamon-core/src/test/scala/kamon/metric/instrument/GaugeSpec.scala b/kamon-core/src/test/scala/kamon/metric/instrument/GaugeSpec.scala deleted file mode 100644 index ec07d66c..00000000 --- a/kamon-core/src/test/scala/kamon/metric/instrument/GaugeSpec.scala +++ /dev/null @@ -1,79 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import java.util.concurrent.atomic.AtomicLong -import kamon.Kamon -import kamon.metric.instrument.Histogram.DynamicRange -import kamon.testkit.BaseKamonSpec -import scala.concurrent.duration._ - -class GaugeSpec extends BaseKamonSpec("gauge-spec") { - - "a Gauge" should { - "automatically record the current value using the configured refresh-interval" in new GaugeFixture { - val (numberOfValuesRecorded, gauge) = createGauge() - Thread.sleep(1.second.toMillis) - - numberOfValuesRecorded.get() should be(10L +- 1L) - gauge.cleanup - } - - "stop automatically recording after a call to cleanup" in new GaugeFixture { - val (numberOfValuesRecorded, gauge) = createGauge() - Thread.sleep(1.second.toMillis) - - gauge.cleanup - numberOfValuesRecorded.get() should be(10L +- 1L) - Thread.sleep(1.second.toMillis) - - numberOfValuesRecorded.get() should be(10L +- 1L) - } - - "produce a Histogram snapshot including all the recorded values" in new GaugeFixture { - val (numberOfValuesRecorded, gauge) = createGauge() - - Thread.sleep(1.second.toMillis) - gauge.cleanup - val snapshot = gauge.collect(Kamon.metrics.buildDefaultCollectionContext) - - snapshot.numberOfMeasurements should be(10L +- 1L) - snapshot.min should be(1) - snapshot.max should be(10L +- 1L) - } - - "not record the current value when doing a collection" in new GaugeFixture { - val (numberOfValuesRecorded, gauge) = createGauge(10 seconds) - - val snapshot = gauge.collect(Kamon.metrics.buildDefaultCollectionContext) - snapshot.numberOfMeasurements should be(0) - numberOfValuesRecorded.get() should be(0) - } - } - - trait GaugeFixture { - def createGauge(refreshInterval: FiniteDuration = 100 millis): (AtomicLong, Gauge) = { - val recordedValuesCounter = new AtomicLong(0) - val gauge = Gauge(DynamicRange(1, 100, 2), refreshInterval, Kamon.metrics.settings.refreshScheduler, { - () ⇒ recordedValuesCounter.addAndGet(1) - }) - - (recordedValuesCounter, gauge) - } - - } -} diff --git a/kamon-core/src/test/scala/kamon/metric/instrument/HistogramSpec.scala b/kamon-core/src/test/scala/kamon/metric/instrument/HistogramSpec.scala deleted file mode 100644 index 9551c6ea..00000000 --- a/kamon-core/src/test/scala/kamon/metric/instrument/HistogramSpec.scala +++ /dev/null @@ -1,157 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import java.nio.LongBuffer - -import kamon.metric.instrument.Histogram.DynamicRange -import org.scalatest.{Matchers, WordSpec} - -import scala.util.Random - -class HistogramSpec extends WordSpec with Matchers { - - "a Histogram" should { - "allow record values within the configured range" in new HistogramFixture { - histogram.record(1000) - histogram.record(5000, count = 100) - histogram.record(10000) - } - - "not fail when recording values higher than the highest trackable value" in new HistogramFixture { - histogram.record(Long.MaxValue) - } - - "reset all recorded levels to zero after a snapshot collection" in new HistogramFixture { - histogram.record(100) - histogram.record(200) - histogram.record(300) - - takeSnapshot().numberOfMeasurements should be(3) - takeSnapshot().numberOfMeasurements should be(0) - } - - "produce a snapshot" which { - "supports min, max, percentile, sum, numberOfMeasurements and recordsIterator operations" in new HistogramFixture { - histogram.record(100) - histogram.record(200, count = 200) - histogram.record(300) - histogram.record(900) - - val snapshot = takeSnapshot() - - snapshot.min should equal(100L +- 1L) - snapshot.max should equal(900L +- 9L) - snapshot.percentile(50.0D) should be(200) - snapshot.percentile(99.5D) should be(300) - snapshot.percentile(99.9D) should be(900) - snapshot.sum should be(41300) - snapshot.numberOfMeasurements should be(203) - - val records = snapshot.recordsIterator.map(r ⇒ r.level → r.count).toSeq - records.size should be(4) - records(0) should be(100 → 1) - records(1) should be(200 → 200) - records(2) should be(300 → 1) - records(3) should be(900 → 1) - } - - "can be scaled" in new HistogramFixture { - histogram.record(100) - histogram.record(200, count = 200) - histogram.record(300) - histogram.record(900) - - val snapshot = takeSnapshot().scale(Time.Seconds, Time.Milliseconds) - - snapshot.min should equal(100000L +- 1000L) - snapshot.max should equal(900000L +- 9000L) - snapshot.percentile(50.0D) should be(200000) - snapshot.percentile(99.5D) should be(300000) - snapshot.percentile(99.9D) should be(900000) - snapshot.sum should be(41300000) - snapshot.numberOfMeasurements should be(203) - - val records = snapshot.recordsIterator.map(r ⇒ r.level → r.count).toSeq - records.size should be(4) - records(0) should be(100000 → 1) - records(1) should be(200000 → 200) - records(2) should be(300000 → 1) - records(3) should be(900000 → 1) - } - - "can be merged with another snapshot" in new MultipleHistogramFixture { - val random = new Random(System.nanoTime()) - - for (repetitions ← 1 to 1000) { - // Put some values on A and Control - for (_ ← 1 to 1000) { - val newRecording = random.nextInt(100000) - controlHistogram.record(newRecording) - histogramA.record(newRecording) - } - - // Put some values on B and Control - for (_ ← 1 to 2000) { - val newRecording = random.nextInt(100000) - controlHistogram.record(newRecording) - histogramB.record(newRecording) - } - - val controlSnapshot = takeSnapshotFrom(controlHistogram) - val histogramASnapshot = takeSnapshotFrom(histogramA) - val histogramBSnapshot = takeSnapshotFrom(histogramB) - - assertEquals(controlSnapshot, histogramASnapshot.merge(histogramBSnapshot, collectionContext)) - assertEquals(controlSnapshot, histogramBSnapshot.merge(histogramASnapshot, collectionContext)) - } - } - } - } - - trait HistogramFixture { - val collectionContext = new CollectionContext { - val buffer: LongBuffer = LongBuffer.allocate(10000) - } - - val histogram = Histogram(DynamicRange(1, 100000, 2)) - - def takeSnapshot(): Histogram.Snapshot = histogram.collect(collectionContext) - } - - trait MultipleHistogramFixture { - val collectionContext = new CollectionContext { - val buffer: LongBuffer = LongBuffer.allocate(10000) - } - - val controlHistogram = Histogram(DynamicRange(1, 100000, 2)) - val histogramA = Histogram(DynamicRange(1, 100000, 2)) - val histogramB = Histogram(DynamicRange(1, 100000, 2)) - - def takeSnapshotFrom(histogram: Histogram): InstrumentSnapshot = histogram.collect(collectionContext) - - def assertEquals(left: InstrumentSnapshot, right: InstrumentSnapshot): Unit = { - val leftSnapshot = left.asInstanceOf[Histogram.Snapshot] - val rightSnapshot = right.asInstanceOf[Histogram.Snapshot] - - leftSnapshot.numberOfMeasurements should equal(rightSnapshot.numberOfMeasurements) - leftSnapshot.min should equal(rightSnapshot.min) - leftSnapshot.max should equal(rightSnapshot.max) - leftSnapshot.recordsIterator.toStream should contain theSameElementsAs (rightSnapshot.recordsIterator.toStream) - } - } -} diff --git a/kamon-core/src/test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala b/kamon-core/src/test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala deleted file mode 100644 index d007d4cd..00000000 --- a/kamon-core/src/test/scala/kamon/metric/instrument/MinMaxCounterSpec.scala +++ /dev/null @@ -1,140 +0,0 @@ -/* ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import java.nio.LongBuffer - -import akka.actor._ -import akka.testkit.TestProbe -import kamon.Kamon -import kamon.metric.instrument.Histogram.{DynamicRange, MutableRecord} -import kamon.testkit.BaseKamonSpec -import scala.concurrent.duration._ - -class MinMaxCounterSpec extends BaseKamonSpec("min-max-counter-spec") { - - "the MinMaxCounter" should { - "track ascending tendencies" in new MinMaxCounterFixture { - mmCounter.increment() - mmCounter.increment(3) - mmCounter.increment() - - val snapshot = collectCounterSnapshot() - - snapshot.min should be(0) - snapshot.max should be(5) - snapshot.recordsIterator.toStream should contain allOf ( - MutableRecord(0, 1), // min - MutableRecord(5, 2) - ) // max and current - } - - "track descending tendencies" in new MinMaxCounterFixture { - mmCounter.increment(5) - mmCounter.decrement() - mmCounter.decrement(3) - mmCounter.decrement() - - val snapshot = collectCounterSnapshot() - - snapshot.min should be(0) - snapshot.max should be(5) - snapshot.recordsIterator.toStream should contain allOf ( - MutableRecord(0, 2), // min and current - MutableRecord(5, 1) - ) // max - } - - "reset the min and max to the current value after taking a snapshot" in new MinMaxCounterFixture { - mmCounter.increment(5) - mmCounter.decrement(3) - - val firstSnapshot = collectCounterSnapshot() - - firstSnapshot.min should be(0) - firstSnapshot.max should be(5) - firstSnapshot.recordsIterator.toStream should contain allOf ( - MutableRecord(0, 1), // min - MutableRecord(2, 1), // current - MutableRecord(5, 1) - ) // max - - val secondSnapshot = collectCounterSnapshot() - - secondSnapshot.min should be(2) - secondSnapshot.max should be(2) - secondSnapshot.recordsIterator.toStream should contain( - MutableRecord(2, 3) - ) // min, max and current - } - - "report zero as the min and current values if the current value fell bellow zero" in new MinMaxCounterFixture { - mmCounter.decrement(3) - - val snapshot = collectCounterSnapshot() - - snapshot.min should be(0) - snapshot.max should be(0) - snapshot.recordsIterator.toStream should contain( - MutableRecord(0, 3) - ) // min, max and current (even while current really is -3 - } - - "never record values bellow zero in very busy situations" in new MinMaxCounterFixture { - val monitor = TestProbe() - val workers = for (workers ← 1 to 50) yield { - system.actorOf(Props(new MinMaxCounterUpdateActor(mmCounter, monitor.ref))) - } - - workers foreach (_ ! "increment") - for (refresh ← 1 to 1000) { - collectCounterSnapshot() - Thread.sleep(1) - } - - monitor.expectNoMsg() - workers foreach (_ ! PoisonPill) - } - } - - trait MinMaxCounterFixture { - val collectionContext = new CollectionContext { - val buffer: LongBuffer = LongBuffer.allocate(64) - } - - val mmCounter = MinMaxCounter(DynamicRange(1, 1000, 2), 1 hour, Kamon.metrics.settings.refreshScheduler) - mmCounter.cleanup // cancel the refresh schedule - - def collectCounterSnapshot(): Histogram.Snapshot = mmCounter.collect(collectionContext) - } -} - -class MinMaxCounterUpdateActor(mmc: MinMaxCounter, monitor: ActorRef) extends Actor { - val x = Array.ofDim[Int](4) - def receive = { - case "increment" ⇒ - mmc.increment() - self ! "decrement" - case "decrement" ⇒ - mmc.decrement() - self ! "increment" - try { - mmc.refreshValues() - } catch { - case _: IndexOutOfBoundsException ⇒ monitor ! "failed" - } - } -} \ No newline at end of file diff --git a/kamon-core/src/test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala b/kamon-core/src/test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala deleted file mode 100644 index 7133579e..00000000 --- a/kamon-core/src/test/scala/kamon/metric/instrument/UnitOfMeasurementSpec.scala +++ /dev/null @@ -1,98 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.metric.instrument - -import kamon.metric.instrument.UnitOfMeasurement.Unknown -import org.scalatest.{Matchers, WordSpec} - -class UnitOfMeasurementSpec extends WordSpec with Matchers { - - "Time unit" should { - "resolve Time Unit by valid name" in { - Time("s") should be(Time.Seconds) - Time("n") should be(Time.Nanoseconds) - Time("ms") should be(Time.Milliseconds) - Time("µs") should be(Time.Microseconds) - } - "fail to resolve Time Unit by invalid name" in { - val ex = intercept[IllegalArgumentException](Time("boo")) - ex.getMessage should be("Can't recognize time unit 'boo'") - } - "scale time properly" in { - val epsilon = 0.0001 - - Time.Nanoseconds.scale(Time.Nanoseconds)(1000000D) should be(1000000D +- epsilon) - Time.Nanoseconds.scale(Time.Microseconds)(1000000D) should be(1000D +- epsilon) - Time.Nanoseconds.scale(Time.Milliseconds)(1000000D) should be(1D +- epsilon) - Time.Nanoseconds.scale(Time.Seconds)(1000000D) should be(0.001D +- epsilon) - Time.Seconds.scale(Time.Nanoseconds)(1D) should be(1000000000D +- epsilon) - } - "allow scale only time" in { - intercept[IllegalArgumentException](Time.Nanoseconds.tryScale(Unknown)(100)) - .getMessage should be("Can't scale different types of units `time` and `unknown`") - intercept[IllegalArgumentException](Time.Nanoseconds.tryScale(Memory.Bytes)(100)) - .getMessage should be("Can't scale different types of units `time` and `bytes`") - val epsilon = 0.0001 - - Time.Nanoseconds.tryScale(Time.Nanoseconds)(100D) should be(100D +- epsilon) - } - } - - "Memory unit" should { - "resolve Memory Unit by valid name" in { - Memory("b") should be(Memory.Bytes) - Memory("Kb") should be(Memory.KiloBytes) - Memory("Mb") should be(Memory.MegaBytes) - Memory("Gb") should be(Memory.GigaBytes) - } - "fail to resolve Memory Unit by invalid name" in { - val ex = intercept[IllegalArgumentException](Memory("boo")) - ex.getMessage should be("Can't recognize memory unit 'boo'") - } - "scale memory properly" in { - val epsilon = 0.0001 - - Memory.Bytes.scale(Memory.Bytes)(1000000D) should be(1000000D +- epsilon) - Memory.Bytes.scale(Memory.KiloBytes)(1000000D) should be(976.5625D +- epsilon) - Memory.Bytes.scale(Memory.MegaBytes)(1000000D) should be(0.9536D +- epsilon) - Memory.Bytes.scale(Memory.GigaBytes)(1000000D) should be(9.3132E-4D +- epsilon) - Memory.MegaBytes.scale(Memory.Bytes)(1D) should be(1048576D +- epsilon) - } - "allow scale only memory" in { - intercept[IllegalArgumentException](Memory.Bytes.tryScale(Unknown)(100)) - .getMessage should be("Can't scale different types of units `bytes` and `unknown`") - intercept[IllegalArgumentException](Memory.Bytes.tryScale(Time.Nanoseconds)(100)) - .getMessage should be("Can't scale different types of units `bytes` and `time`") - val epsilon = 0.0001 - - Memory.Bytes.tryScale(Memory.Bytes)(100D) should be(100D +- epsilon) - } - - } - - "Unknown unit" should { - "allow scale only Unknown" in { - intercept[IllegalArgumentException](Unknown.tryScale(Memory.Bytes)(100)) - .getMessage should be("Can't scale different types of units `unknown` and `bytes`") - intercept[IllegalArgumentException](Unknown.tryScale(Time.Nanoseconds)(100)) - .getMessage should be("Can't scale different types of units `unknown` and `time`") - - Unknown.scale(Unknown)(100D) should be(100D) - } - - } -} diff --git a/kamon-core/src/test/scala/kamon/testkit/BaseKamonSpec.scala b/kamon-core/src/test/scala/kamon/testkit/BaseKamonSpec.scala deleted file mode 100644 index 995f15fa..00000000 --- a/kamon-core/src/test/scala/kamon/testkit/BaseKamonSpec.scala +++ /dev/null @@ -1,70 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.testkit - -import akka.actor.ActorSystem -import akka.testkit.{ImplicitSender, TestKitBase} -import com.typesafe.config.Config -import kamon.{ActorSystemTools, Kamon} -import kamon.metric.{Entity, EntitySnapshot, SubscriptionsDispatcher} -import kamon.trace.TraceContext -import kamon.util.LazyActorRef -import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} - -abstract class BaseKamonSpec(actorSystemName: String) extends TestKitBase with WordSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll { - lazy val collectionContext = Kamon.metrics.buildDefaultCollectionContext - override implicit lazy val system: ActorSystem = { - Kamon.start(mergedConfig) - ActorSystem(actorSystemName, mergedConfig) - } - - def config: Config = Kamon.config - - def mergedConfig: Config = config.withFallback(Kamon.config) - - def newContext(name: String): TraceContext = - Kamon.tracer.newContext(name) - - def newContext(name: String, token: String): TraceContext = - Kamon.tracer.newContext(name, Option(token)) - - def newContext(name: String, token: String, tags: Map[String, String]): TraceContext = - Kamon.tracer.newContext(name, Option(token), tags) - - def takeSnapshotOf(name: String, category: String): EntitySnapshot = { - val recorder = Kamon.metrics.find(name, category).get - recorder.collect(collectionContext) - } - - def takeSnapshotOf(name: String, category: String, tags: Map[String, String]): EntitySnapshot = { - val recorder = Kamon.metrics.find(Entity(name, category, tags)).get - recorder.collect(collectionContext) - } - - def flushSubscriptions(): Unit = { - val subscriptionsField = Kamon.metrics.getClass.getDeclaredField("_subscriptions") - subscriptionsField.setAccessible(true) - val subscriptions = subscriptionsField.get(Kamon.metrics).asInstanceOf[LazyActorRef] - - subscriptions.tell(SubscriptionsDispatcher.Tick) - } - - override protected def afterAll(): Unit = { - Kamon.shutdown() - ActorSystemTools.terminateActorSystem(system) - } -} diff --git a/kamon-core/src/test/scala/kamon/trace/SamplerSpec.scala b/kamon-core/src/test/scala/kamon/trace/SamplerSpec.scala deleted file mode 100644 index 88cdf116..00000000 --- a/kamon-core/src/test/scala/kamon/trace/SamplerSpec.scala +++ /dev/null @@ -1,76 +0,0 @@ -package kamon.trace - -import kamon.testkit.BaseKamonSpec -import kamon.util.NanoInterval - -class SamplerSpec extends BaseKamonSpec("sampler-spec") { - - "the Sampler" should { - "work as intended" when { - "using all mode" in { - val sampler = SampleAll - - sampler.shouldTrace should be(true) - - sampler.shouldReport(NanoInterval.default) should be(true) - } - - "using random mode" in { - val sampler = new RandomSampler(100) - - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(true) - - sampler.shouldReport(NanoInterval.default) should be(true) - } - - "using ordered mode" in { - var sampler = new OrderedSampler(1) - - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(true) - - sampler = new OrderedSampler(2) - - sampler.shouldTrace should be(false) - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(false) - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(false) - sampler.shouldTrace should be(true) - - sampler.shouldReport(NanoInterval.default) should be(true) - } - - "using threshold mode" in { - val sampler = new ThresholdSampler(new NanoInterval(10000000L)) - - sampler.shouldTrace should be(true) - - sampler.shouldReport(new NanoInterval(5000000L)) should be(false) - sampler.shouldReport(new NanoInterval(10000000L)) should be(true) - sampler.shouldReport(new NanoInterval(15000000L)) should be(true) - sampler.shouldReport(new NanoInterval(0L)) should be(false) - } - - "using clock mode" in { - val sampler = new ClockSampler(new NanoInterval(10000000L)) - - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(false) - Thread.sleep(1L) - sampler.shouldTrace should be(false) - Thread.sleep(10L) - sampler.shouldTrace should be(true) - sampler.shouldTrace should be(false) - - sampler.shouldReport(NanoInterval.default) should be(true) - } - } - } - -} diff --git a/kamon-core/src/test/scala/kamon/trace/SimpleTraceSpec.scala b/kamon-core/src/test/scala/kamon/trace/SimpleTraceSpec.scala deleted file mode 100644 index fe7ad053..00000000 --- a/kamon-core/src/test/scala/kamon/trace/SimpleTraceSpec.scala +++ /dev/null @@ -1,167 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2016 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import kamon.Kamon -import kamon.testkit.BaseKamonSpec - -import scala.concurrent.duration._ -import scala.util.control.NoStackTrace - -class SimpleTraceSpec extends BaseKamonSpec("simple-trace-spec") { - - "the simple tracing" should { - "send a TraceInfo when the trace has finished and all segments are finished" in { - Kamon.tracer.subscribe(testActor) - - Tracer.withContext(newContext("simple-trace-without-segments")) { - Tracer.currentContext.startSegment("segment-one", "test-segment", "test").finish() - Tracer.currentContext.startSegment("segment-two", "test-segment", "test").finish() - Tracer.currentContext.finish() - } - - val traceInfo = expectMsgType[TraceInfo] - Kamon.tracer.unsubscribe(testActor) - - traceInfo.name should be("simple-trace-without-segments") - traceInfo.segments.size should be(2) - traceInfo.segments.find(_.name == "segment-one") should be('defined) - traceInfo.segments.find(_.name == "segment-two") should be('defined) - } - - "send a TraceInfo when the trace has finished with error and all segments are finished" in { - Kamon.tracer.subscribe(testActor) - - Tracer.withContext(newContext("simple-trace-with-error-and-without-segments")) { - Tracer.currentContext.startSegment("segment-one", "test-segment", "test").finish() - Tracer.currentContext.startSegment("segment-two", "test-segment", "test").finish() - Tracer.currentContext.finishWithError(TraceException("awesome-trace-error")) - } - - val traceInfo = expectMsgType[TraceInfo] - Kamon.tracer.unsubscribe(testActor) - - traceInfo.name should be("simple-trace-with-error-and-without-segments") - traceInfo.status should be(Status.FinishedWithError) - traceInfo.segments.size should be(2) - traceInfo.segments.find(_.name == "segment-one") should be('defined) - traceInfo.segments.find(_.name == "segment-two") should be('defined) - } - - "send a TraceInfo when the trace has finished with error and all segments are finished with error" in { - Kamon.tracer.subscribe(testActor) - - Tracer.withContext(newContext("simple-trace-with-error-and-without-segments")) { - Tracer.currentContext.startSegment("segment-one-finished-with-error", "test-segment", "test").finishWithError(SegmentException("awesome-segment-exception")) - Tracer.currentContext.startSegment("segment-two-finished-with-error", "test-segment", "test").finishWithError(SegmentException("awesome-segment-exception")) - Tracer.currentContext.finishWithError(TraceException("awesome-trace-error")) - } - - val traceInfo = expectMsgType[TraceInfo] - Kamon.tracer.unsubscribe(testActor) - - traceInfo.name should be("simple-trace-with-error-and-without-segments") - traceInfo.status should be(Status.FinishedWithError) - traceInfo.segments.size should be(2) - - val segmentOne = traceInfo.segments.find(_.name == "segment-one-finished-with-error") - val segmentTwo = traceInfo.segments.find(_.name == "segment-two-finished-with-error") - - segmentOne.get.status should be(Status.FinishedWithError) - segmentTwo.get.status should be(Status.FinishedWithError) - } - - "send a TraceInfo when the trace has finished and all segments are finished and both contains tags" in { - Kamon.tracer.subscribe(testActor) - - Tracer.withContext(newContext("simple-trace-without-segments", "awesome-token", Map("environment" → "production"))) { - Tracer.currentContext.startSegment("segment-one", "test-segment", "test", Map("segment-one-info" → "info")).finish() - Tracer.currentContext.startSegment("segment-two", "test-segment", "test", Map("segment-two-info" → "info")).finish() - Tracer.currentContext.finish() - } - - val traceInfo = expectMsgType[TraceInfo] - Kamon.tracer.unsubscribe(testActor) - - traceInfo.name should be("simple-trace-without-segments") - traceInfo.tags should be(Map("environment" → "production")) - traceInfo.segments.size should be(2) - - val segmentOne = traceInfo.segments.find(_.name == "segment-one") - val segmentTwo = traceInfo.segments.find(_.name == "segment-two") - - segmentOne.get.tags should be(Map("segment-one-info" → "info")) - segmentTwo.get.tags should be(Map("segment-two-info" → "info")) - } - - "send a TraceInfo when the trace has finished and all segments are finished and contains added tag" in { - Kamon.tracer.subscribe(testActor) - - Tracer.withContext(newContext("simple-trace-without-segments", "awesome-token")) { - Tracer.currentContext.addTag("environment", "production") - val segmentOne = Tracer.currentContext.startSegment("segment-one", "test-segment", "test") - segmentOne.addTag("segment-one-info", "info") - segmentOne.finish() - val segmentTwo = Tracer.currentContext.startSegment("segment-two", "test-segment", "test") - segmentTwo.addTag("segment-two-info", "info") - segmentTwo.finish() - Tracer.currentContext.finish() - } - - val traceInfo = expectMsgType[TraceInfo] - Kamon.tracer.unsubscribe(testActor) - - traceInfo.name should be("simple-trace-without-segments") - traceInfo.tags should be(Map("environment" → "production")) - traceInfo.segments.size should be(2) - - val segmentOne = traceInfo.segments.find(_.name == "segment-one") - val segmentTwo = traceInfo.segments.find(_.name == "segment-two") - - segmentOne.get.tags should be(Map("segment-one-info" → "info")) - segmentTwo.get.tags should be(Map("segment-two-info" → "info")) - } - - "incubate the tracing context if there are open segments after finishing" in { - Kamon.tracer.subscribe(testActor) - - val secondSegment = Tracer.withContext(newContext("simple-trace-without-segments")) { - Tracer.currentContext.startSegment("segment-one", "test-segment", "test").finish() - val segment = Tracer.currentContext.startSegment("segment-two", "test-segment", "test") - Tracer.currentContext.finish() - segment - } - - expectNoMsg(2 seconds) - secondSegment.finish() - - within(10 seconds) { - val traceInfo = expectMsgType[TraceInfo] - Kamon.tracer.unsubscribe(testActor) - - traceInfo.name should be("simple-trace-without-segments") - traceInfo.segments.size should be(2) - traceInfo.segments.find(_.name == "segment-one") should be('defined) - traceInfo.segments.find(_.name == "segment-two") should be('defined) - } - } - - } -} - -case class TraceException(message: String) extends RuntimeException(message) with NoStackTrace -case class SegmentException(message: String) extends RuntimeException(message) with NoStackTrace \ No newline at end of file diff --git a/kamon-core/src/test/scala/kamon/trace/TraceContextManipulationSpec.scala b/kamon-core/src/test/scala/kamon/trace/TraceContextManipulationSpec.scala deleted file mode 100644 index d817186a..00000000 --- a/kamon-core/src/test/scala/kamon/trace/TraceContextManipulationSpec.scala +++ /dev/null @@ -1,113 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import kamon.testkit.BaseKamonSpec - -class TraceContextManipulationSpec extends BaseKamonSpec("trace-metrics-spec") { - - "the TraceContext api" should { - "allow starting a trace within a specified block of code, and only within that block of code" in { - val createdContext = Tracer.withContext(newContext("start-context")) { - Tracer.currentContext should not be empty - Tracer.currentContext - } - - Tracer.currentContext shouldBe empty - createdContext.name shouldBe ("start-context") - } - - "allow starting a trace within a specified block of code, providing a trace-token and only within that block of code" in { - val createdContext = Tracer.withContext(newContext("start-context-with-token", "token-1")) { - Tracer.currentContext should not be empty - Tracer.currentContext - } - - Tracer.currentContext shouldBe empty - createdContext.name shouldBe ("start-context-with-token") - createdContext.token should be("token-1") - } - - "allow providing a TraceContext and make it available within a block of code" in { - val createdContext = newContext("manually-provided-trace-context") - - Tracer.currentContext shouldBe empty - Tracer.withContext(createdContext) { - Tracer.currentContext should be(createdContext) - } - - Tracer.currentContext shouldBe empty - } - - "allow renaming a trace" in { - val createdContext = Tracer.withContext(newContext("trace-before-rename")) { - Tracer.currentContext.rename("renamed-trace") - Tracer.currentContext - } - - Tracer.currentContext shouldBe empty - createdContext.name shouldBe "renamed-trace" - } - - "allow tagging and untagging a trace" in { - val createdContext = Tracer.withContext(newContext("trace-before-rename")) { - Tracer.currentContext.addTag("trace-tag", "tag-1") - Tracer.currentContext - } - - Tracer.currentContext shouldBe empty - createdContext.tags shouldBe Map("trace-tag" → "tag-1") - - createdContext.removeTag("trace-tag", "tag-1") - - createdContext.tags shouldBe Map.empty - } - - "allow creating a segment within a trace" in { - val createdContext = Tracer.withContext(newContext("trace-with-segments")) { - Tracer.currentContext.startSegment("segment-1", "segment-1-category", "segment-library") - Tracer.currentContext - } - - Tracer.currentContext shouldBe empty - createdContext.name shouldBe "trace-with-segments" - } - - "allow renaming a segment" in { - Tracer.withContext(newContext("trace-with-renamed-segment")) { - val segment = Tracer.currentContext.startSegment("original-segment-name", "segment-label", "segment-library") - segment.name should be("original-segment-name") - - segment.rename("new-segment-name") - segment.name should be("new-segment-name") - } - } - - "allow tagging and untagging a segment" in { - Tracer.withContext(newContext("trace-with-renamed-segment")) { - val segment = Tracer.currentContext.startSegment("segment-name", "segment-label", "segment-library") - segment.tags should be(Map.empty) - - segment.addTag("segment-tag", "tag-1") - segment.tags should be(Map("segment-tag" → "tag-1")) - - segment.removeTag("segment-tag", "tag-1") - segment.tags should be(Map.empty) - } - } - } -} \ No newline at end of file diff --git a/kamon-core/src/test/scala/kamon/trace/TraceLocalSpec.scala b/kamon-core/src/test/scala/kamon/trace/TraceLocalSpec.scala deleted file mode 100644 index 41d5bc83..00000000 --- a/kamon-core/src/test/scala/kamon/trace/TraceLocalSpec.scala +++ /dev/null @@ -1,108 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.trace - -import kamon.testkit.BaseKamonSpec -import kamon.trace.TraceLocal.AvailableToMdc -import kamon.trace.logging.MdcKeysSupport -import kamon.util.Supplier -import org.scalatest.concurrent.PatienceConfiguration -import org.scalatest.OptionValues -import org.slf4j.MDC - -class TraceLocalSpec extends BaseKamonSpec("trace-local-spec") with PatienceConfiguration with OptionValues with MdcKeysSupport { - val SampleTraceLocalKeyAvailableToMDC = AvailableToMdc("someKey") - - object SampleTraceLocalKey extends TraceLocal.TraceLocalKey[String] - - "the TraceLocal storage" should { - "allow storing and retrieving values" in { - Tracer.withContext(newContext("store-and-retrieve-trace-local")) { - val testString = "Hello World" - - TraceLocal.store(SampleTraceLocalKey)(testString) - TraceLocal.retrieve(SampleTraceLocalKey).value should equal(testString) - } - } - - "return None when retrieving a non existent key" in { - Tracer.withContext(newContext("non-existent-key")) { - TraceLocal.retrieve(SampleTraceLocalKey) should equal(None) - } - } - - "throws an exception when trying to get a non existent key" in { - Tracer.withContext(newContext("non-existent-key")) { - intercept[NoSuchElementException] { - TraceLocal.get(SampleTraceLocalKey) - } - } - } - - "return the given value when retrieving a non existent key" in { - Tracer.withContext(newContext("non-existent-key")) { - TraceLocal.getOrElse(SampleTraceLocalKey, new Supplier[String] { def get = "optionalValue" }) should equal("optionalValue") - } - } - - "return None when retrieving a key without a current TraceContext" in { - TraceLocal.retrieve(SampleTraceLocalKey) should equal(None) - } - - "be attached to the TraceContext when it is propagated" in { - val testString = "Hello World" - val testContext = Tracer.withContext(newContext("manually-propagated-trace-local")) { - TraceLocal.store(SampleTraceLocalKey)(testString) - TraceLocal.retrieve(SampleTraceLocalKey).value should equal(testString) - Tracer.currentContext - } - - /** No TraceLocal should be available here */ - TraceLocal.retrieve(SampleTraceLocalKey) should equal(None) - - Tracer.withContext(testContext) { - TraceLocal.retrieve(SampleTraceLocalKey).value should equal(testString) - } - } - - "allow retrieve a value from the MDC when was created a key with AvailableToMdc(cool-key)" in { - Tracer.withContext(newContext("store-and-retrieve-trace-local-and-copy-to-mdc")) { - val testString = "Hello MDC" - - TraceLocal.store(SampleTraceLocalKeyAvailableToMDC)(testString) - TraceLocal.retrieve(SampleTraceLocalKeyAvailableToMDC).value should equal(testString) - - withMdc { - MDC.get("someKey") should equal(testString) - } - } - } - - "allow retrieve a value from the MDC when was created a key with AvailableToMdc.storeForMdc(String, String)" in { - Tracer.withContext(newContext("store-and-retrieve-trace-local-and-copy-to-mdc")) { - val testString = "Hello MDC" - - TraceLocal.storeForMdc("someKey", testString) - TraceLocal.retrieve(SampleTraceLocalKeyAvailableToMDC).value should equal(testString) - - withMdc { - MDC.get("someKey") should equal(testString) - } - } - } - } -} diff --git a/kamon-core/src/test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala b/kamon-core/src/test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala deleted file mode 100644 index 39374e79..00000000 --- a/kamon-core/src/test/scala/kamon/trace/logging/MdcKeysSupportSpec.scala +++ /dev/null @@ -1,44 +0,0 @@ -package kamon.trace.logging - -import kamon.testkit.BaseKamonSpec -import kamon.trace.{EmptyTraceContext, Tracer} -import org.slf4j.MDC - -class MdcKeysSupportSpec extends BaseKamonSpec("mdc-keys-support-spec") { - - "Running code with MDC support" should { - "add nothing to the MDC" when { - "the trace context is empty" in { - // Given an empty trace context. - Tracer.withContext(EmptyTraceContext) { - // When some code is executed with MDC support. - MdcKeysSupport.withMdc { - // Then the MDC should not contain the trace token. - Option(MDC.get(MdcKeysSupport.traceTokenKey)) should be(None) - // Or name - Option(MDC.get(MdcKeysSupport.traceNameKey)) should be(None) - } - } - } - } - "add the trace token and name to the context" when { - "the trace context is not empty" in { - // Given a trace context. - Tracer.withNewContext("name", Some("token")) { - // When some code is executed with MDC support. - MdcKeysSupport.withMdc { - // Then the MDC should contain the trace token. - Option(MDC.get(MdcKeysSupport.traceTokenKey)) should be(Some("token")) - // And name - Option(MDC.get(MdcKeysSupport.traceNameKey)) should be(Some("name")) - } - - // Then after code is executed the MDC should have been cleared. - Option(MDC.get(MdcKeysSupport.traceTokenKey)) should be(None) - Option(MDC.get(MdcKeysSupport.traceNameKey)) should be(None) - } - } - } - } - -} diff --git a/kamon-core/src/test/scala/kamon/util/GlobPathFilterSpec.scala b/kamon-core/src/test/scala/kamon/util/GlobPathFilterSpec.scala deleted file mode 100644 index 7d585087..00000000 --- a/kamon-core/src/test/scala/kamon/util/GlobPathFilterSpec.scala +++ /dev/null @@ -1,73 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2014 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import org.scalatest.{Matchers, WordSpecLike} - -class GlobPathFilterSpec extends WordSpecLike with Matchers { - "The GlobPathFilter" should { - - "match a single expression" in { - val filter = new GlobPathFilter("/user/actor") - - filter.accept("/user/actor") shouldBe true - - filter.accept("/user/actor/something") shouldBe false - filter.accept("/user/actor/somethingElse") shouldBe false - } - - "match all expressions in the same level" in { - val filter = new GlobPathFilter("/user/*") - - filter.accept("/user/actor") shouldBe true - filter.accept("/user/otherActor") shouldBe true - - filter.accept("/user/something/actor") shouldBe false - filter.accept("/user/something/otherActor") shouldBe false - } - - "match all expressions in the same levelss" in { - val filter = new GlobPathFilter("**") - - filter.accept("GET: /ping") shouldBe true - filter.accept("GET: /ping/pong") shouldBe true - } - - "match all expressions and crosses the path boundaries" in { - val filter = new GlobPathFilter("/user/actor-**") - - filter.accept("/user/actor-") shouldBe true - filter.accept("/user/actor-one") shouldBe true - filter.accept("/user/actor-one/other") shouldBe true - - filter.accept("/user/something/actor") shouldBe false - filter.accept("/user/something/otherActor") shouldBe false - } - - "match exactly one character" in { - val filter = new GlobPathFilter("/user/actor-?") - - filter.accept("/user/actor-1") shouldBe true - filter.accept("/user/actor-2") shouldBe true - filter.accept("/user/actor-3") shouldBe true - - filter.accept("/user/actor-one") shouldBe false - filter.accept("/user/actor-two") shouldBe false - filter.accept("/user/actor-tree") shouldBe false - } - } -} diff --git a/kamon-core/src/test/scala/kamon/util/NeedToScaleSpec.scala b/kamon-core/src/test/scala/kamon/util/NeedToScaleSpec.scala deleted file mode 100644 index cba6ad98..00000000 --- a/kamon-core/src/test/scala/kamon/util/NeedToScaleSpec.scala +++ /dev/null @@ -1,67 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import com.typesafe.config.ConfigFactory -import kamon.metric.instrument.{Memory, Time} -import org.scalatest.{Matchers, WordSpec} - -class NeedToScaleSpec extends WordSpec with Matchers { - - "NeedToScale" should { - "extract time unit to scale to from config" in { - val config = ConfigFactory.parseString( - """ - |time-units = "ms" - """.stripMargin - ) - - config match { - case NeedToScale(timeUnits, memoryUnits) ⇒ - timeUnits should be(Some(Time.Milliseconds)) - memoryUnits should be(None) - } - } - "extract memory unit to scale to from config" in { - val config = ConfigFactory.parseString( - """ - |memory-units = "kb" - """.stripMargin - ) - - config match { - case NeedToScale(timeUnits, memoryUnits) ⇒ - timeUnits should be(None) - memoryUnits should be(Some(Memory.KiloBytes)) - } - } - "extract nothing if config has no proper keys" in { - val config = ConfigFactory.parseString( - """ - |some-other-key = "value" - """.stripMargin - ) - - config match { - case NeedToScale(timeUnits, memoryUnits) ⇒ - fail("Should not match") - case _ ⇒ - } - } - } - -} diff --git a/kamon-core/src/test/scala/kamon/util/RegexPathFilterSpec.scala b/kamon-core/src/test/scala/kamon/util/RegexPathFilterSpec.scala deleted file mode 100644 index a2cc8629..00000000 --- a/kamon-core/src/test/scala/kamon/util/RegexPathFilterSpec.scala +++ /dev/null @@ -1,59 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util - -import org.scalatest.{Matchers, WordSpecLike} - -class RegexPathFilterSpec extends WordSpecLike with Matchers { - "The RegexPathFilter" should { - - "match a single expression" in { - val filter = new RegexPathFilter("/user/actor") - - filter.accept("/user/actor") shouldBe true - - filter.accept("/user/actor/something") shouldBe false - filter.accept("/user/actor/somethingElse") shouldBe false - } - - "match arbitray expressions ending with wildcard" in { - val filter = new RegexPathFilter("/user/.*") - - filter.accept("/user/actor") shouldBe true - filter.accept("/user/otherActor") shouldBe true - filter.accept("/user/something/actor") shouldBe true - filter.accept("/user/something/otherActor") shouldBe true - - filter.accept("/otheruser/actor") shouldBe false - filter.accept("/otheruser/otherActor") shouldBe false - filter.accept("/otheruser/something/actor") shouldBe false - filter.accept("/otheruser/something/otherActor") shouldBe false - } - - "match numbers" in { - val filter = new RegexPathFilter("/user/actor-\\d") - - filter.accept("/user/actor-1") shouldBe true - filter.accept("/user/actor-2") shouldBe true - filter.accept("/user/actor-3") shouldBe true - - filter.accept("/user/actor-one") shouldBe false - filter.accept("/user/actor-two") shouldBe false - filter.accept("/user/actor-tree") shouldBe false - } - } -} diff --git a/kamon-core/src/test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala b/kamon-core/src/test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala deleted file mode 100644 index 4e5394f8..00000000 --- a/kamon-core/src/test/scala/kamon/util/executors/ExecutorServiceMetricsSpec.scala +++ /dev/null @@ -1,75 +0,0 @@ -/* - * ========================================================================================= - * Copyright © 2013-2015 the kamon project - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, - * either express or implied. See the License for the specific language governing permissions - * and limitations under the License. - * ========================================================================================= - */ - -package kamon.util.executors - -import java.util.concurrent.Executors - -import kamon.Kamon -import kamon.metric.{Entity, EntityRecorder} -import kamon.testkit.BaseKamonSpec - -class ExecutorServiceMetricsSpec extends BaseKamonSpec("executor-service-metrics-spec") { - - "the ExecutorServiceMetrics" should { - "register a SingleThreadPool, collect their metrics and remove it" in { - val singleThreadPoolExecutor = Executors.newSingleThreadExecutor() - val singleThreadPoolExecutorEntity = ExecutorServiceMetrics.register("single-thread-pool", singleThreadPoolExecutor) - findExecutorRecorder(singleThreadPoolExecutorEntity) should not be empty - - ExecutorServiceMetrics.remove(singleThreadPoolExecutorEntity) - findExecutorRecorder(singleThreadPoolExecutorEntity) should be(empty) - } - - "register a ThreadPoolExecutor, collect their metrics and remove it" in { - val threadPoolExecutor = Executors.newCachedThreadPool() - val threadPoolExecutorEntity = ExecutorServiceMetrics.register("thread-pool-executor", threadPoolExecutor) - findExecutorRecorder(threadPoolExecutorEntity) should not be empty - - ExecutorServiceMetrics.remove(threadPoolExecutorEntity) - findExecutorRecorder(threadPoolExecutorEntity) should be(empty) - } - - "register a ScheduledThreadPoolExecutor, collect their metrics and remove it" in { - val scheduledThreadPoolExecutor = Executors.newSingleThreadScheduledExecutor() - val scheduledThreadPoolEntity = ExecutorServiceMetrics.register("scheduled-thread-pool-executor", scheduledThreadPoolExecutor) - findExecutorRecorder(scheduledThreadPoolEntity) should not be empty - - ExecutorServiceMetrics.remove(scheduledThreadPoolEntity) - findExecutorRecorder(scheduledThreadPoolEntity) should be(empty) - } - - "register a Java ForkJoinPool, collect their metrics and remove it" in { - val javaForkJoinPool = Executors.newWorkStealingPool() - val javaForkJoinPoolEntity = ExecutorServiceMetrics.register("java-fork-join-pool", javaForkJoinPool) - findExecutorRecorder(javaForkJoinPoolEntity) should not be empty - - ExecutorServiceMetrics.remove(javaForkJoinPoolEntity) - findExecutorRecorder(javaForkJoinPoolEntity) should be(empty) - } - - "register a Scala ForkJoinPool, collect their metrics and remove it" in { - val scalaForkJoinPool = new scala.concurrent.forkjoin.ForkJoinPool() - val scalaForkJoinPoolEntity = ExecutorServiceMetrics.register("scala-fork-join-pool", scalaForkJoinPool) - findExecutorRecorder(scalaForkJoinPoolEntity) should not be empty - - ExecutorServiceMetrics.remove(scalaForkJoinPoolEntity) - findExecutorRecorder(scalaForkJoinPoolEntity) should be(empty) - } - - def findExecutorRecorder(entity: Entity): Option[EntityRecorder] = Kamon.metrics.find(entity) - } -} -- cgit v1.2.3