Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,15 @@ jobs:
env:
DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }}

# TODO (jack-berg): Select or build appropriate benchmarks for other key areas:
# - Log SDK record & export
# - Trace SDK record & export
# - Metric SDK export
# - Noop implementation
- name: Run Benchmark
run: |
cd sdk/trace/build
java -jar libs/opentelemetry-sdk-trace-*-jmh.jar -rf json SpanBenchmark SpanPipelineBenchmark ExporterBenchmark
cd sdk/all/build
java -jar libs/opentelemetry-sdk-*-jmh.jar -rf json MetricRecordBenchmark

- name: Use CLA approved github bot
run: .github/scripts/use-cla-approved-bot.sh
Expand All @@ -54,7 +59,7 @@ jobs:
uses: benchmark-action/github-action-benchmark@4bdcce38c94cec68da58d012ac24b7b1155efe8b # v1.20.7
with:
tool: 'jmh'
output-file-path: sdk/trace/build/jmh-result.json
output-file-path: sdk/all/build/jmh-result.json
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@tylerbenson this benchmark-action only allows you to have a single output file path. This means we need to all the published benchmarks to be in a single module, such that we can run with them a single java -jar *-jmh.jar ... command. I think the opentelemetry-sdk artifact is a good spot for this.

This turns out to be a useful constraint as I think it will be nice to have all the public benchmarks colocated.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LTGM!

gh-pages-branch: benchmarks
github-token: ${{ secrets.GITHUB_TOKEN }}
benchmark-data-dir-path: "benchmarks"
Expand Down
2 changes: 2 additions & 0 deletions sdk/all/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,6 @@ dependencies {
testAnnotationProcessor("com.google.auto.value:auto-value")

testImplementation(project(":sdk:testing"))

jmh(project(":sdk:testing"))
}
246 changes: 246 additions & 0 deletions sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,246 @@
/*
* Copyright The OpenTelemetry Authors
* SPDX-License-Identifier: Apache-2.0
*/

package io.opentelemetry.sdk;

import static io.opentelemetry.sdk.metrics.InstrumentType.COUNTER;
import static io.opentelemetry.sdk.metrics.InstrumentType.GAUGE;
import static io.opentelemetry.sdk.metrics.InstrumentType.HISTOGRAM;
import static io.opentelemetry.sdk.metrics.InstrumentType.UP_DOWN_COUNTER;

import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.Meter;
import io.opentelemetry.api.trace.Span;
import io.opentelemetry.api.trace.Tracer;
import io.opentelemetry.sdk.common.export.MemoryMode;
import io.opentelemetry.sdk.metrics.Aggregation;
import io.opentelemetry.sdk.metrics.ExemplarFilter;
import io.opentelemetry.sdk.metrics.InstrumentType;
import io.opentelemetry.sdk.metrics.InstrumentValueType;
import io.opentelemetry.sdk.metrics.SdkMeterProvider;
import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
import io.opentelemetry.sdk.metrics.export.DefaultAggregationSelector;
import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
import io.opentelemetry.sdk.trace.SdkTracerProvider;
import io.opentelemetry.sdk.trace.samplers.Sampler;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Group;
import org.openjdk.jmh.annotations.GroupThreads;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Warmup;

/**
* Notes on interpreting the data:
*
* <p>The benchmark has two dimensions which partially overlap: cardinality and thread count.
* Cardinality dictates how many unique attribute sets (i.e. series) are recorded to, and thread
* count dictates how many threads are simultaneously recording to those series. In all cases, the
* record path needs to look up an aggregation handle for the series corresponding to the
* measurement's {@link Attributes} in a {@link java.util.concurrent.ConcurrentHashMap}. That will
* be the case until otel adds support for <a
* href="https://github.com/open-telemetry/opentelemetry-specification/issues/4126">bound
* instruments</a>. The cardinality dictates the size of this map, which has some impact on
* performance. However, by far the dominant bottleneck is contention. That is, the number of
* threads simultaneously trying to record to the same series. Increasing the threads increases
* contention. Increasing cardinality decreases contention, as the threads are now spreading their
* record activities over more distinct series. The highest contention scenario is cardinality=1,
* threads=4. Any scenario with threads=1 has zero contention.
*
* <p>It's useful to characterize the performance of the metrics system under contention, as some
* high-performance applications may have many threads trying to record to the same series. It's
* also useful to characterize the performance of the metrics system under low contention, as some
* high-performance applications may not frequently be trying to concurrently record to the same
* series yet still care about the overhead of each record operation.
*
* <p>{@link AggregationTemporality} can impact performance because additional concurrency controls
* are needed to ensure there are no duplicate, partial, or lost writes while resetting the set of
* timeseries each collection.
*/
public class MetricRecordBenchmark {

private static final int recordCount = 10 * 1024;

@State(Scope.Benchmark)
public static class ThreadState {

@Param InstrumentTypeAndAggregation instrumentTypeAndAggregation;

@Param AggregationTemporality aggregationTemporality;

@Param({"1", "100"})
int cardinality;

// The following parameters are excluded from the benchmark to reduce combinatorial explosion
// but can optionally be enabled for adhoc evaluation.

// InstrumentValueType doesn't materially impact performance. Uncomment to evaluate.
// @Param
// InstrumentValueType instrumentValueType;
InstrumentValueType instrumentValueType = InstrumentValueType.LONG;

// MemoryMode almost exclusively impacts collect from a performance standpoint. Uncomment to
// evaluate.
// @Param
// MemoryMode memoryMode;
MemoryMode memoryMode = MemoryMode.REUSABLE_DATA;

// Exemplars can impact performance, but we skip evaluation to limit test cases. Uncomment to
// evaluate.
// @Param({"true", "false"})
// boolean exemplars;
boolean exemplars = false;

OpenTelemetrySdk openTelemetry;
Instrument instrument;
List<Long> measurements;
List<Attributes> attributesList;
Span span;
io.opentelemetry.context.Scope contextScope;

@Setup
@SuppressWarnings("MustBeClosedChecker")
public void setup() {
InstrumentType instrumentType = instrumentTypeAndAggregation.instrumentType;
Aggregation aggregation = instrumentTypeAndAggregation.aggregation;

openTelemetry =
OpenTelemetrySdk.builder()
.setTracerProvider(SdkTracerProvider.builder().setSampler(Sampler.alwaysOn()).build())
.setMeterProvider(
SdkMeterProvider.builder()
.registerMetricReader(
InMemoryMetricReader.builder()
.setAggregationTemporalitySelector(unused -> aggregationTemporality)
.setDefaultAggregationSelector(
DefaultAggregationSelector.getDefault()
.with(instrumentType, aggregation))
.setMemoryMode(memoryMode)
.build())
.setExemplarFilter(
exemplars ? ExemplarFilter.traceBased() : ExemplarFilter.alwaysOff())
.build())
.build();

Meter meter = openTelemetry.getMeter("benchmark");
instrument = getInstrument(meter, instrumentType, instrumentValueType);
Tracer tracer = openTelemetry.getTracer("benchmark");
span = tracer.spanBuilder("benchmark").startSpan();
// We suppress warnings on closing here, as we rely on tests to make sure context is closed.
contextScope = span.makeCurrent();

Random random = new Random();
attributesList = new ArrayList<>(cardinality);
AttributeKey<String> key = AttributeKey.stringKey("key");
String last = "aaaaaaaaaaaaaaaaaaaaaaaaaa";
for (int i = 0; i < cardinality; i++) {
char[] chars = last.toCharArray();
chars[random.nextInt(last.length())] = (char) (random.nextInt(26) + 'a');
last = new String(chars);
attributesList.add(Attributes.of(key, last));
}
Collections.shuffle(attributesList);

measurements = new ArrayList<>(recordCount);
for (int i = 0; i < recordCount; i++) {
measurements.add((long) random.nextInt(2000));
}
Collections.shuffle(measurements);
}

@TearDown
public void tearDown() {
contextScope.close();
span.end();
openTelemetry.shutdown();
}
}

@Benchmark
@Group("threads1")
@GroupThreads(1)
@Fork(1)
@Warmup(iterations = 5, time = 1)
@Measurement(iterations = 5, time = 1)
public void record_1Thread(ThreadState threadState) {
record(threadState);
}

@Benchmark
@Group("threads4")
@GroupThreads(4)
@Fork(1)
@Warmup(iterations = 5, time = 1)
@Measurement(iterations = 5, time = 1)
public void record_4Threads(ThreadState threadState) {
record(threadState);
}

private static void record(ThreadState threadState) {
for (int i = 0; i < recordCount; i++) {
Attributes attributes = threadState.attributesList.get(i % threadState.attributesList.size());
long value = threadState.measurements.get(i % threadState.measurements.size());
threadState.instrument.record(value, attributes);
}
}

@SuppressWarnings("ImmutableEnumChecker")
public enum InstrumentTypeAndAggregation {
COUNTER_SUM(COUNTER, Aggregation.sum()),
UP_DOWN_COUNTER_SUM(UP_DOWN_COUNTER, Aggregation.sum()),
GAUGE_LAST_VALUE(GAUGE, Aggregation.lastValue()),
HISTOGRAM_EXPLICIT(HISTOGRAM, Aggregation.explicitBucketHistogram()),
HISTOGRAM_BASE2_EXPONENTIAL(HISTOGRAM, Aggregation.base2ExponentialBucketHistogram());

InstrumentTypeAndAggregation(InstrumentType instrumentType, Aggregation aggregation) {
this.instrumentType = instrumentType;
this.aggregation = aggregation;
}

private final InstrumentType instrumentType;
private final Aggregation aggregation;
}

private interface Instrument {
void record(long value, Attributes attributes);
}

private static Instrument getInstrument(
Meter meter, InstrumentType instrumentType, InstrumentValueType instrumentValueType) {
String name = "instrument";
switch (instrumentType) {
case COUNTER:
return instrumentValueType == InstrumentValueType.DOUBLE
? meter.counterBuilder(name).ofDoubles().build()::add
: meter.counterBuilder(name).build()::add;
case UP_DOWN_COUNTER:
return instrumentValueType == InstrumentValueType.DOUBLE
? meter.upDownCounterBuilder(name).ofDoubles().build()::add
: meter.upDownCounterBuilder(name).build()::add;
case HISTOGRAM:
return instrumentValueType == InstrumentValueType.DOUBLE
? meter.histogramBuilder(name).build()::record
: meter.histogramBuilder(name).ofLongs().build()::record;
case GAUGE:
return instrumentValueType == InstrumentValueType.DOUBLE
? meter.gaugeBuilder(name).build()::set
: meter.gaugeBuilder(name).ofLongs().build()::set;
case OBSERVABLE_COUNTER:
case OBSERVABLE_UP_DOWN_COUNTER:
case OBSERVABLE_GAUGE:
}
throw new IllegalArgumentException();
}
}

This file was deleted.

Loading
Loading