diff --git a/reporters/kamon-opentelemetry/src/main/resources/reference.conf b/reporters/kamon-opentelemetry/src/main/resources/reference.conf index 22e1aeada..d16a6440e 100644 --- a/reporters/kamon-opentelemetry/src/main/resources/reference.conf +++ b/reporters/kamon-opentelemetry/src/main/resources/reference.conf @@ -70,54 +70,22 @@ kamon.otel { } explicit-histo-boundaries { - default-boundaries = [ - 10, - 30, - 100, - 300, - 1000, - 3000, - 10000, - 30000, - 100000 + # Same as defaults from https://opentelemetry.io/docs/specs/otel/metrics/sdk/#explicit-bucket-histogram-aggregation + default-buckets = [ + 0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000 ] - time-boundaries = [ - 0.005, - 0.01, - 0.025, - 0.05, - 0.075, - 0.1, - 0.25, - 0.5, - 0.75, - 1, - 2.5, - 5, - 7.5, - 10 + # The following are the same as for the prometheus reporter default values + time-buckets = [ + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10 ] - information-boundaries = [ - 512, - 1024, - 2048, - 4096, - 16384, - 65536, - 524288, - 1048576 + information-buckets = [ + 512, 1024, 2048, 4096, 16384, 65536, 524288, 1048576 ] - percentage-boundaries = [ - 20, - 40, - 60, - 70, - 80, - 90, - 95 + percentage-buckets = [ + 20, 40, 60, 70, 80, 90, 95 ] # Per metric overrides are possible by specifying the metric name and the histogram buckets here @@ -126,6 +94,22 @@ kamon.otel { // "akka.actor.processing-time" = [0.1, 1.0, 10.0] } } + + exponential-histo-boundaries { + default-bucket-count = 160 + + time-bucket-count = 160 + + information-bucket-count = 160 + + percentage-bucket-count = 100 + + # Per metric overrides are possible by specifying the metric name and the histogram buckets here + custom { + // example: + // "akka.actor.processing-time" = 3 + } + } } # Arbitrary key-value pairs that further identify the environment where this service instance is running. diff --git a/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/MetricsConverter.scala b/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/MetricsConverter.scala index 502cb3210..e5d3a076f 100644 --- a/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/MetricsConverter.scala +++ b/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/MetricsConverter.scala @@ -23,15 +23,17 @@ import io.opentelemetry.sdk.resources.Resource import kamon.metric.Instrument.Snapshot import kamon.metric.{Distribution, MeasurementUnit, MetricSnapshot, PeriodSnapshot} import kamon.otel.HistogramFormat.{Explicit, Exponential, HistogramFormat} +import kamon.otel.MetricsConverter.{ExplBucketFn, ExpoBucketFn} import org.slf4j.LoggerFactory import java.lang.{Double => JDouble, Long => JLong} import java.time.Instant -import java.util.{Collection => JCollection, ArrayList => JArrayList} +import java.util.{ArrayList => JArrayList, Collection => JCollection} import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer -class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, from: Instant, to: Instant, bucketConfig: (String, MeasurementUnit) => Seq[JDouble]) { +class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, from: Instant, to: Instant, + explBucketConfig: ExplBucketFn, expoBucketConfig: ExpoBucketFn) { private val maxDouble: JDouble = JDouble.valueOf(JDouble.MAX_VALUE) private val logger = LoggerFactory.getLogger(getClass) private val fromNs = from.toEpochMilli * 1000000 @@ -55,7 +57,7 @@ class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, fro toString(gauge.settings.unit), toGaugeData(gauge.instruments)) - private def toExplicitHistogramDatum(bucketConfiguration: Seq[JDouble])(s: Snapshot[Distribution]): HistogramPointData = { + private def getExplBucketCounts(bucketConfiguration: Seq[JDouble])(s: Snapshot[Distribution]) = { val counts = ArrayBuffer.newBuilder[JLong] val boundaryIterator: Iterator[JDouble] = (bucketConfiguration :+ maxDouble).iterator var nextBoundary = boundaryIterator.next() @@ -74,6 +76,11 @@ class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, fro inBucketCount = 0L } counts += inBucketCount + counts + } + + private def toExplicitHistogramDatum(bucketConfiguration: Seq[JDouble])(s: Snapshot[Distribution]): HistogramPointData = { + val counts = getExplBucketCounts(bucketConfiguration)(s) ImmutableHistogramPointData.create( fromNs, toNs, @@ -93,7 +100,7 @@ class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, fro } def convertExplicitHistogram(histogram: MetricSnapshot.Distributions): Option[MetricData] = { - val bucketConfiguration = bucketConfig(histogram.name, histogram.settings.unit) + val bucketConfiguration = explBucketConfig(histogram.name, histogram.settings.unit) toExplicitHistogramData(bucketConfiguration, histogram.instruments).map(d => ImmutableMetricData.createDoubleHistogram( resource, @@ -104,6 +111,30 @@ class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, fro d)) } + private def getExpoBucketCounts(maxBucketCount: Int)(s: Snapshot[Distribution]) = { + val min = s.value.min + val max = s.value.max + val counts = ArrayBuffer.newBuilder[JLong] + // val boundaryIterator: Iterator[JDouble] = (bucketConfiguration :+ maxDouble).iterator + // var nextBoundary = boundaryIterator.next() + // var inBucketCount = 0L + // for (el <- s.value.bucketsIterator) { + // while (el.value > nextBoundary) { + // nextBoundary = boundaryIterator.next() + // counts += inBucketCount + // inBucketCount = 0L + // } + // inBucketCount += el.frequency + // } + // while (boundaryIterator.hasNext) { + // counts += inBucketCount + // boundaryIterator.next() + // inBucketCount = 0L + // } + // counts += inBucketCount + counts + } + private def toExponentialHistogramData(distributions: Seq[Snapshot[Distribution]]): Option[ExponentialHistogramData] = distributions.filter(_.value.buckets.nonEmpty) match { case Nil => None @@ -113,9 +144,9 @@ class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, fro case zigZag: Distribution.ZigZagCounts => logger.error("Unable to construct exponential histogram data - Unimplemented") None -// Some(ExponentialHistogramPointData.create( -// ???, zigZag.sum, ???, ???, ???, fromNs, toNs, SpanConverter.toAttributes(s.tags), new JArrayList[DoubleExemplarData]() -// )) + Some(ExponentialHistogramPointData.create( + ???, zigZag.sum, ???, ???, ???, fromNs, toNs, SpanConverter.toAttributes(s.tags), new JArrayList[DoubleExemplarData]() + )) case _ => logger.error("Unable to construct exponential histogram data - only ZigZagCounts distribution can be converted") None @@ -161,8 +192,11 @@ class WithResourceMetricsConverter(resource: Resource, kamonVersion: String, fro * Converts Kamon metrics to OpenTelemetry [[MetricData]]s */ private[otel] object MetricsConverter { - def convert(resource: Resource, kamonVersion: String, histogramFormat: HistogramFormat, bucketConfig: (String, MeasurementUnit) => Seq[JDouble])(metrics: PeriodSnapshot): JCollection[MetricData] = { - val converter = new WithResourceMetricsConverter(resource, kamonVersion, metrics.from, metrics.to, bucketConfig) + type ExplBucketFn = (String, MeasurementUnit) => Seq[JDouble] + type ExpoBucketFn = (String, MeasurementUnit) => Int + def convert(resource: Resource, kamonVersion: String, histogramFormat: HistogramFormat, + explicitBucketConfig: ExplBucketFn, exponentialBucketConfig: ExpoBucketFn)(metrics: PeriodSnapshot): JCollection[MetricData] = { + val converter = new WithResourceMetricsConverter(resource, kamonVersion, metrics.from, metrics.to, explicitBucketConfig, exponentialBucketConfig) val gauges = metrics.gauges.filter(_.instruments.nonEmpty).map(converter.convertGauge) val histograms = (metrics.histograms ++ metrics.timers ++ metrics.rangeSamplers).filter(_.instruments.nonEmpty) .flatMap(converter.convertHistogram(histogramFormat)) diff --git a/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/OpenTelemetryMetricsReporter.scala b/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/OpenTelemetryMetricsReporter.scala index eef6da7bd..a9eec1c23 100644 --- a/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/OpenTelemetryMetricsReporter.scala +++ b/reporters/kamon-opentelemetry/src/main/scala/kamon/otel/OpenTelemetryMetricsReporter.scala @@ -34,14 +34,54 @@ import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext import scala.util.{Failure, Success} -case class BucketConfig( - defaultBuckets: Seq[JDouble], - timeBuckets: Seq[JDouble], - informationBuckets: Seq[JDouble], - percentageBuckets: Seq[JDouble], - customBuckets: Map[String, Seq[JDouble]] +case class BucketConfig[T]( + defaultBuckets: T, + timeBuckets: T, + informationBuckets: T, + percentageBuckets: T, + customBuckets: Map[String, T] ) +object Buckets { + + private def readCustomBuckets(customBuckets: Config): Map[String, Seq[java.lang.Double]] = + customBuckets + .topLevelKeys + .map(k => (k, customBuckets.getDoubleList(ConfigUtil.quoteString(k)).asScala.toSeq)) + .toMap + + private def readCustomBucketsExpo(customBuckets: Config): Map[String, Int] = + customBuckets + .topLevelKeys + .map(k => (k, customBuckets.getInt(ConfigUtil.quoteString(k)))) + .toMap + + def parseBucketConfig(newConfig: Config): BucketConfig[Seq[JDouble]] = BucketConfig[Seq[JDouble]]( + newConfig.getDoubleList("default-buckets").asScala.toSeq, + newConfig.getDoubleList("time-buckets").asScala.toSeq, + informationBuckets = newConfig.getDoubleList("information-buckets").asScala.toSeq, + percentageBuckets = newConfig.getDoubleList("percentage-buckets").asScala.toSeq, + readCustomBuckets(newConfig.getConfig("custom"))) + + def resolveBucketConfiguration[T](bucketConfig: BucketConfig[T])(metricName: String, unit: MeasurementUnit): T = + bucketConfig.customBuckets.getOrElse( + metricName, + unit.dimension match { + case Dimension.Time => bucketConfig.timeBuckets + case Dimension.Information => bucketConfig.informationBuckets + case Dimension.Percentage => bucketConfig.percentageBuckets + case _ => bucketConfig.defaultBuckets + } + ) + + def parseExpoBucketConfig(newConfig: Config): BucketConfig[Int] = BucketConfig[Int]( + newConfig.getInt("default-bucket-count"), + newConfig.getInt("time-bucket-count"), + informationBuckets = newConfig.getInt("information-bucket-count"), + percentageBuckets = newConfig.getInt("percentage-bucket-count"), + readCustomBucketsExpo(newConfig.getConfig("custom"))) + +} object OpenTelemetryMetricsReporter { private val logger = LoggerFactory.getLogger(classOf[OpenTelemetryMetricsReporter]) private val kamonSettings: Status.Settings = Kamon.status().settings() @@ -93,32 +133,14 @@ class OpenTelemetryMetricsReporter(metricsServiceFactory: OpenTelemetryConfigura Explicit } - def readCustomBuckets(customBuckets: Config): Map[String, Seq[java.lang.Double]] = - customBuckets - .topLevelKeys - .map(k => (k, customBuckets.getDoubleList(ConfigUtil.quoteString(k)).asScala.toSeq)) - .toMap - - val bucketConfig = BucketConfig( - newConfig.getDoubleList("kamon.otel.explicit-histo-boundaries.default-boundaries").asScala.toSeq, - newConfig.getDoubleList("kamon.otel.explicit-histo-boundaries.time-boundaries").asScala.toSeq, - informationBuckets = newConfig.getDoubleList("kamon.otel.explicit-histo-boundaries.information-boundaries").asScala.toSeq, - percentageBuckets = newConfig.getDoubleList("kamon.otel.explicit-histo-boundaries.percentage-boundaries").asScala.toSeq, - readCustomBuckets(newConfig.getConfig("kamon.otel.explicit-histo-boundaries.custom"))) - - - def resolveBucketConfiguration(metricName: String, unit: MeasurementUnit): Seq[JDouble] = - bucketConfig.customBuckets.getOrElse( - metricName, - unit.dimension match { - case Dimension.Time => bucketConfig.timeBuckets - case Dimension.Information => bucketConfig.informationBuckets - case Dimension.Percentage => bucketConfig.percentageBuckets - case _ => bucketConfig.defaultBuckets - } - ) - - this.metricsConverterFunc = MetricsConverter.convert(resource, kamonSettings.version, histogramFormat, resolveBucketConfiguration) + val explicitBucketConfig = Buckets.parseBucketConfig(newConfig.getConfig("kamon.otel.explicit-histo-boundaries")) + val exponentialBucketConfig = Buckets.parseExpoBucketConfig(newConfig.getConfig("kamon.otel.exponential-histo-boundaries")) + + val resolveExplicitBucketConfiguration = Buckets.resolveBucketConfiguration(explicitBucketConfig) _ + + val resolveExponentialBucketConfiguration = Buckets.resolveBucketConfiguration(exponentialBucketConfig) _ + + this.metricsConverterFunc = MetricsConverter.convert(resource, kamonSettings.version, histogramFormat, resolveExplicitBucketConfiguration, resolveExponentialBucketConfiguration) this.metricsService = Option(metricsServiceFactory.apply(config)) } diff --git a/reporters/kamon-opentelemetry/src/test/resources/application.conf b/reporters/kamon-opentelemetry/src/test/resources/application.conf index fcdf17669..612f753e1 100644 --- a/reporters/kamon-opentelemetry/src/test/resources/application.conf +++ b/reporters/kamon-opentelemetry/src/test/resources/application.conf @@ -16,7 +16,7 @@ kamon { otel { attributes = "att1=v1, att2 = v2 ,att3=+a%3Db%2Cc%3Dd+" explicit-histo-boundaries { - default-boundaries = [ + default-buckets = [ 1, 2, 3,