Skip to content

chore(NODE-6870): add tags to normalized_throughput #4478

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Mar 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions .evergreen/config.in.yml
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,18 @@ functions:
- .evergreen/run-tests.sh

"perf send":
- command: s3.put
params:
aws_key: ${aws_key}
aws_secret: ${aws_secret}
local_file: src/test/benchmarks/driver_bench/results.json
optional: true
# TODO NODE-4707 - change upload directory to ${UPLOAD_BUCKET}
remote_file: mongo-node-driver/${revision}/${version_id}/results.${task_name}.json
bucket: mciuploads
permissions: public-read
content_type: application/json
display_name: "Performance Results"
- command: subprocess.exec
params:
working_dir: src
Expand Down
11 changes: 11 additions & 0 deletions .evergreen/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,17 @@ functions:
args:
- .evergreen/run-tests.sh
perf send:
- command: s3.put
params:
aws_key: ${aws_key}
aws_secret: ${aws_secret}
local_file: src/test/benchmarks/driver_bench/results.json
optional: true
remote_file: mongo-node-driver/${revision}/${version_id}/results.${task_name}.json
bucket: mciuploads
permissions: public-read
content_type: application/json
display_name: Performance Results
- command: subprocess.exec
params:
working_dir: src
Expand Down
4 changes: 3 additions & 1 deletion .evergreen/perf-send.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
#!/usr/bin/env bash

set -euox pipefail
set -euo pipefail

source $DRIVERS_TOOLS/.evergreen/init-node-and-npm-env.sh

TARGET_FILE=$(realpath "${TARGET_FILE:-./test/benchmarks/driver_bench/results.json}")

set -o xtrace

node ./.evergreen/perf_send.mjs $TARGET_FILE
47 changes: 28 additions & 19 deletions test/benchmarks/driver_bench/src/main.mts
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,13 @@ for (const [suite, benchmarks] of Object.entries(tests)) {
console.groupEnd();
}

const metricInfoFilterByName =
(testName: string) =>
({ info: { test_name } }: MetricInfo) =>
test_name === testName;

const isMBsMetric = ({ name }: Metric) => name === 'megabytes_per_second';

function calculateCompositeBenchmarks(results: MetricInfo[]) {
const composites = {
singleBench: ['findOne', 'smallDocInsertOne', 'largeDocInsertOne'],
Expand Down Expand Up @@ -144,13 +151,6 @@ function calculateCompositeBenchmarks(results: MetricInfo[]) {
]
};

const aMetricInfo =
(testName: string) =>
({ info: { test_name } }: MetricInfo) =>
test_name === testName;

const anMBsMetric = ({ name }: Metric) => name === 'megabytes_per_second';

let readBenchResult;
let writeBenchResult;

Expand All @@ -162,10 +162,10 @@ function calculateCompositeBenchmarks(results: MetricInfo[]) {

let sum = 0;
for (const testName of compositeTests) {
const testScore = results.find(aMetricInfo(testName));
const testScore = results.find(metricInfoFilterByName(testName));
assert.ok(testScore, `${compositeName} suite requires ${testName} for composite score`);

const metric = testScore.metrics.find(anMBsMetric);
const metric = testScore.metrics.find(isMBsMetric);
assert.ok(metric, `${testName} is missing a megabytes_per_second metric`);

sum += metric.value;
Expand Down Expand Up @@ -199,31 +199,40 @@ function calculateCompositeBenchmarks(results: MetricInfo[]) {
}

function calculateNormalizedResults(results: MetricInfo[]): MetricInfo[] {
const baselineBench = results.find(r => r.info.test_name === 'cpuBaseline');
const pingBench = results.find(r => r.info.test_name === 'ping');
const baselineBench = results.find(metricInfoFilterByName('cpuBaseline'));
const pingBench = results.find(metricInfoFilterByName('ping'));

assert.ok(pingBench, 'ping bench results not found!');
assert.ok(baselineBench, 'baseline results not found!');
const pingThroughput = pingBench.metrics[0].value;
const cpuBaseline = baselineBench.metrics[0].value;
assert.ok(baselineBench, 'cpuBaseline results not found!');

const cpuBaseline = baselineBench.metrics.find(isMBsMetric);
const pingThroughput = pingBench.metrics.find(isMBsMetric);

assert.ok(cpuBaseline, 'cpu benchmark does not have a MB/s metric');
assert.ok(pingThroughput, 'ping does not have a MB/s metric');

for (const bench of results) {
if (bench.info.test_name === 'cpuBaseline') continue;

const currentMetric = bench.metrics.find(isMBsMetric);
assert.ok(currentMetric, `${bench.info.test_name} does not have a MB/s metric`);

if (bench.info.test_name === 'ping') {
bench.metrics.push({
name: 'normalized_throughput',
value: bench.metrics[0].value / cpuBaseline,
value: currentMetric.value / cpuBaseline.value,
metadata: {
tags: currentMetric.metadata.tags,
improvement_direction: 'up'
}
});
}
// Compute normalized_throughput of benchmarks against ping bench
else {
} else {
// Compute normalized_throughput of benchmarks against ping bench
bench.metrics.push({
name: 'normalized_throughput',
value: bench.metrics[0].value / pingThroughput,
value: currentMetric.value / pingThroughput.value,
metadata: {
tags: currentMetric.metadata.tags,
improvement_direction: 'up'
}
});
Expand Down