Skip to content

Commit 99b60d5

Browse files
authored
fix: update sum metric query based on v1 integration test (#650)
Fix the sum query to produce the correct results from the min/max test case from v1. Ref: HDX-1421
1 parent 7f1a5a0 commit 99b60d5

File tree

6 files changed

+251
-97
lines changed

6 files changed

+251
-97
lines changed

.changeset/silent-dolphins-call.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
"@hyperdx/common-utils": patch
3+
"@hyperdx/api": patch
4+
---
5+
6+
Fixed sum metric query to pass integration test case from v1.

packages/api/src/clickhouse/__tests__/__snapshots__/renderChartConfig.test.ts.snap

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,41 @@ Array [
2121
]
2222
`;
2323

24+
exports[`renderChartConfig Query Metrics calculates min_rate/max_rate correctly for sum metrics: maxSum 1`] = `
25+
Array [
26+
Object {
27+
"__hdx_time_bucket": "2022-01-05T00:00:00Z",
28+
"max(toFloat64OrNull(toString(Value)))": 24,
29+
},
30+
Object {
31+
"__hdx_time_bucket": "2022-01-05T00:10:00Z",
32+
"max(toFloat64OrNull(toString(Value)))": 134,
33+
},
34+
]
35+
`;
36+
37+
exports[`renderChartConfig Query Metrics calculates min_rate/max_rate correctly for sum metrics: minSum 1`] = `
38+
Array [
39+
Object {
40+
"__hdx_time_bucket": "2022-01-05T00:00:00Z",
41+
"min(toFloat64OrNull(toString(Value)))": 15,
42+
},
43+
Object {
44+
"__hdx_time_bucket": "2022-01-05T00:10:00Z",
45+
"min(toFloat64OrNull(toString(Value)))": 52,
46+
},
47+
]
48+
`;
49+
2450
exports[`renderChartConfig Query Metrics handles counter resets correctly for sum metrics 1`] = `
2551
Array [
2652
Object {
2753
"__hdx_time_bucket": "2022-01-05T00:00:00Z",
28-
"sum(toFloat64OrNull(toString(Rate)))": 15,
54+
"sum(toFloat64OrNull(toString(Value)))": 15,
2955
},
3056
Object {
3157
"__hdx_time_bucket": "2022-01-05T00:10:00Z",
32-
"sum(toFloat64OrNull(toString(Rate)))": 52,
58+
"sum(toFloat64OrNull(toString(Value)))": 52,
3359
},
3460
]
3561
`;
@@ -115,19 +141,19 @@ exports[`renderChartConfig Query Metrics single sum rate 1`] = `
115141
Array [
116142
Object {
117143
"__hdx_time_bucket": "2022-01-05T00:00:00Z",
118-
"sum(toFloat64OrNull(toString(Rate)))": 19,
144+
"sum(toFloat64OrNull(toString(Value)))": 19,
119145
},
120146
Object {
121147
"__hdx_time_bucket": "2022-01-05T00:05:00Z",
122-
"sum(toFloat64OrNull(toString(Rate)))": 79,
148+
"sum(toFloat64OrNull(toString(Value)))": 79,
123149
},
124150
Object {
125151
"__hdx_time_bucket": "2022-01-05T00:10:00Z",
126-
"sum(toFloat64OrNull(toString(Rate)))": 5813,
152+
"sum(toFloat64OrNull(toString(Value)))": 5813,
127153
},
128154
Object {
129155
"__hdx_time_bucket": "2022-01-05T00:15:00Z",
130-
"sum(toFloat64OrNull(toString(Rate)))": 78754,
156+
"sum(toFloat64OrNull(toString(Value)))": 78754,
131157
},
132158
]
133159
`;

packages/api/src/clickhouse/__tests__/renderChartConfig.test.ts

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -532,12 +532,23 @@ describe('renderChartConfig', () => {
532532
expect(await queryData(query)).toMatchSnapshot();
533533
});
534534

535-
// FIXME: here are the expected values
536-
// [0, 1, 8, 8, 15, 15, 23, 25, 25, 67]
537-
// [0, 2, 9, 9, 24, 34, 44, 66, 66, 158]
538-
// min -> [15, 52]
539-
// max -> [24, 134]
540-
it.skip('calculates min_rate/max_rate correctly for sum metrics', async () => {
535+
it('calculates min_rate/max_rate correctly for sum metrics', async () => {
536+
// Based on the data inserted in the fixture, the expected stream of values
537+
// for each series after adjusting for the zero reset should be:
538+
// MIN_VARIANT_0: [0, 1, 8, 8, 15, 15, 23, 25, 25, 67]
539+
// MIN_VARIANT_1: [0, 2, 9, 9, 24, 34, 44, 66, 66, 158]
540+
//
541+
// At the 10 minute buckets, should result in three buckets for each where
542+
// the first bucket is outside the query window.
543+
// MIN_VARIANT_0: [0], [1, 8, 8, 15], [15, 23, 25, 25, 67]]
544+
// MIN_VARIANT_1: [0], [2, 9, 9, 24], [34, 44, 66, 66, 158]]
545+
//
546+
// When comparing the value at the end of the buckets over the filtered
547+
// time frame it should result in the following counts added per bucket as:
548+
// MIN_VARIANT_0: [15, 52]
549+
// MIN_VARIANT_1: [24, 134]
550+
//
551+
// These values are what we apply the aggregation functions to.
541552
const minQuery = await renderChartConfig(
542553
{
543554
select: [
@@ -562,6 +573,7 @@ describe('renderChartConfig', () => {
562573
},
563574
metadata,
564575
);
576+
expect(await queryData(minQuery)).toMatchSnapshot('minSum');
565577

566578
const maxQuery = await renderChartConfig(
567579
{
@@ -587,6 +599,7 @@ describe('renderChartConfig', () => {
587599
},
588600
metadata,
589601
);
602+
expect(await queryData(maxQuery)).toMatchSnapshot('maxSum');
590603
});
591604
});
592605
});

packages/common-utils/src/__tests__/__snapshots__/renderChartConfig.test.ts.snap

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,3 +27,78 @@ exports[`renderChartConfig should generate sql for a single gauge metric 1`] = `
2727
TO toUnixTimestamp(toStartOfInterval(fromUnixTimestamp64Milli(1765670400000), INTERVAL 1 minute))
2828
STEP 60 LIMIT 10"
2929
`;
30+
31+
exports[`renderChartConfig should generate sql for a single histogram metric 1`] = `
32+
"WITH HistRate AS (SELECT *, any(BucketCounts) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevBucketCounts,
33+
any(CountLength) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevCountLength,
34+
any(AttributesHash) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevAttributesHash,
35+
IF(AggregationTemporality = 1,
36+
BucketCounts,
37+
IF(AttributesHash = PrevAttributesHash AND CountLength = PrevCountLength,
38+
arrayMap((prev, curr) -> IF(curr < prev, curr, toUInt64(toInt64(curr) - toInt64(prev))), PrevBucketCounts, BucketCounts),
39+
BucketCounts)) as BucketRates
40+
FROM (
41+
SELECT *, cityHash64(mapConcat(ScopeAttributes, ResourceAttributes, Attributes)) AS AttributesHash,
42+
length(BucketCounts) as CountLength
43+
FROM default.otel_metrics_histogram)
44+
WHERE MetricName = 'http.server.duration'
45+
ORDER BY Attributes, TimeUnix ASC
46+
),RawHist AS (
47+
SELECT *, toUInt64( 0.5 * arraySum(BucketRates)) AS Rank,
48+
arrayCumSum(BucketRates) as CumRates,
49+
arrayFirstIndex(x -> if(x > Rank, 1, 0), CumRates) AS BucketLowIdx,
50+
IF(BucketLowIdx = length(BucketRates),
51+
ExplicitBounds[length(ExplicitBounds)], -- if the low bound is the last bucket, use the last bound value
52+
IF(BucketLowIdx > 1, -- indexes are 1-based
53+
ExplicitBounds[BucketLowIdx] + (ExplicitBounds[BucketLowIdx + 1] - ExplicitBounds[BucketLowIdx]) *
54+
intDivOrZero(
55+
Rank - CumRates[BucketLowIdx - 1],
56+
CumRates[BucketLowIdx] - CumRates[BucketLowIdx - 1]),
57+
arrayElement(ExplicitBounds, BucketLowIdx + 1) * intDivOrZero(Rank, CumRates[BucketLowIdx]))) as Rate
58+
FROM HistRate) SELECT sum(
59+
toFloat64OrNull(toString(Rate))
60+
) FROM RawHist WHERE (TimeUnix >= fromUnixTimestamp64Milli(1739318400000) AND TimeUnix <= fromUnixTimestamp64Milli(1765670400000)) LIMIT 10"
61+
`;
62+
63+
exports[`renderChartConfig should generate sql for a single sum metric 1`] = `
64+
"WITH Source AS (
65+
SELECT
66+
*,
67+
cityHash64(mapConcat(ScopeAttributes, ResourceAttributes, Attributes)) AS AttributesHash,
68+
IF(AggregationTemporality = 1,
69+
SUM(Value) OVER (PARTITION BY AttributesHash ORDER BY AttributesHash, TimeUnix ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
70+
deltaSum(Value) OVER (PARTITION BY AttributesHash ORDER BY AttributesHash, TimeUnix ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
71+
) AS Value
72+
FROM default.otel_metrics_sum
73+
WHERE (TimeUnix >= toStartOfInterval(fromUnixTimestamp64Milli(1739318400000), INTERVAL 5 minute) - INTERVAL 5 minute AND TimeUnix <= toStartOfInterval(fromUnixTimestamp64Milli(1765670400000), INTERVAL 5 minute) + INTERVAL 5 minute) AND ((MetricName = 'db.client.connections.usage'))),Bucketed AS (
74+
SELECT
75+
toStartOfInterval(toDateTime(TimeUnix), INTERVAL 5 minute) AS \`__hdx_time_bucket2\`,
76+
AttributesHash,
77+
last_value(Source.Value) AS \`__hdx_value_high\`,
78+
any(\`__hdx_value_high\`) OVER(PARTITION BY AttributesHash ORDER BY \`__hdx_time_bucket2\` ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS \`__hdx_value_high_prev\`,
79+
\`__hdx_value_high\` - \`__hdx_value_high_prev\` AS Value,
80+
any(ResourceAttributes) AS ResourceAttributes,
81+
any(ResourceSchemaUrl) AS ResourceSchemaUrl,
82+
any(ScopeName) AS ScopeName,
83+
any(ScopeVersion) AS ScopeVersion,
84+
any(ScopeAttributes) AS ScopeAttributes,
85+
any(ScopeDroppedAttrCount) AS ScopeDroppedAttrCount,
86+
any(ScopeSchemaUrl) AS ScopeSchemaUrl,
87+
any(ServiceName) AS ServiceName,
88+
any(MetricName) AS MetricName,
89+
any(MetricDescription) AS MetricDescription,
90+
any(MetricUnit) AS MetricUnit,
91+
any(Attributes) AS Attributes,
92+
any(StartTimeUnix) AS StartTimeUnix,
93+
any(Flags) AS Flags,
94+
any(AggregationTemporality) AS AggregationTemporality,
95+
any(IsMonotonic) AS IsMonotonic
96+
FROM Source
97+
GROUP BY AttributesHash, \`__hdx_time_bucket2\`
98+
ORDER BY AttributesHash, \`__hdx_time_bucket2\`
99+
) SELECT avg(
100+
toFloat64OrNull(toString(Value))
101+
),toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` FROM Bucketed WHERE (\`__hdx_time_bucket2\` >= fromUnixTimestamp64Milli(1739318400000) AND \`__hdx_time_bucket2\` <= fromUnixTimestamp64Milli(1765670400000)) GROUP BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` ORDER BY toStartOfInterval(toDateTime(\`__hdx_time_bucket2\`), INTERVAL 5 minute) AS \`__hdx_time_bucket\` WITH FILL FROM toUnixTimestamp(toStartOfInterval(fromUnixTimestamp64Milli(1739318400000), INTERVAL 5 minute))
102+
TO toUnixTimestamp(toStartOfInterval(fromUnixTimestamp64Milli(1765670400000), INTERVAL 5 minute))
103+
STEP 300 LIMIT 10"
104+
`;

packages/common-utils/src/__tests__/renderChartConfig.test.ts

Lines changed: 28 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -94,29 +94,33 @@ describe('renderChartConfig', () => {
9494

9595
const generatedSql = await renderChartConfig(config, mockMetadata);
9696
const actual = parameterizedQueryToSql(generatedSql);
97-
expect(actual).toBe(
98-
'WITH RawSum AS (SELECT *,\n' +
99-
' any(Value) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevValue,\n' +
100-
' any(AttributesHash) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevAttributesHash,\n' +
101-
' IF(AggregationTemporality = 1,\n' +
102-
' Value,IF(Value - PrevValue < 0 AND AttributesHash = PrevAttributesHash, Value,\n' +
103-
' IF(AttributesHash != PrevAttributesHash, 0, Value - PrevValue))) as Rate\n' +
104-
' FROM (\n' +
105-
' SELECT *, \n' +
106-
' cityHash64(mapConcat(ScopeAttributes, ResourceAttributes, Attributes)) AS AttributesHash\n' +
107-
' FROM default.otel_metrics_sum\n' +
108-
" WHERE MetricName = 'db.client.connections.usage'\n" +
109-
' ORDER BY AttributesHash, TimeUnix ASC\n' +
110-
' ) ) SELECT avg(\n' +
111-
' toFloat64OrNull(toString(Rate))\n' +
112-
' ),toStartOfInterval(toDateTime(TimeUnix), INTERVAL 5 minute) AS `__hdx_time_bucket`' +
113-
' FROM RawSum WHERE (TimeUnix >= fromUnixTimestamp64Milli(1739318400000) AND TimeUnix <= fromUnixTimestamp64Milli(1765670400000))' +
114-
' GROUP BY toStartOfInterval(toDateTime(TimeUnix), INTERVAL 5 minute) AS `__hdx_time_bucket`' +
115-
' ORDER BY toStartOfInterval(toDateTime(TimeUnix), INTERVAL 5 minute) AS `__hdx_time_bucket`' +
116-
' WITH FILL FROM toUnixTimestamp(toStartOfInterval(fromUnixTimestamp64Milli(1739318400000), INTERVAL 5 minute))\n' +
117-
' TO toUnixTimestamp(toStartOfInterval(fromUnixTimestamp64Milli(1765670400000), INTERVAL 5 minute))\n' +
118-
' STEP 300' +
119-
' LIMIT 10',
97+
expect(actual).toMatchSnapshot();
98+
});
99+
100+
it('should throw error for string select on sum metric', async () => {
101+
const config: ChartConfigWithOptDateRange = {
102+
displayType: DisplayType.Line,
103+
connection: 'test-connection',
104+
metricTables: {
105+
gauge: 'otel_metrics_gauge',
106+
histogram: 'otel_metrics_histogram',
107+
sum: 'otel_metrics_sum',
108+
},
109+
from: {
110+
databaseName: 'default',
111+
tableName: '',
112+
},
113+
select: 'Value',
114+
where: '',
115+
whereLanguage: 'sql',
116+
timestampValueExpression: 'TimeUnix',
117+
dateRange: [new Date('2025-02-12'), new Date('2025-12-14')],
118+
granularity: '5 minute',
119+
limit: { limit: 10 },
120+
};
121+
122+
await expect(renderChartConfig(config, mockMetadata)).rejects.toThrow(
123+
'multi select or string select on metrics not supported',
120124
);
121125
});
122126

@@ -152,39 +156,6 @@ describe('renderChartConfig', () => {
152156

153157
const generatedSql = await renderChartConfig(config, mockMetadata);
154158
const actual = parameterizedQueryToSql(generatedSql);
155-
expect(actual).toBe(
156-
'WITH HistRate AS (SELECT *, any(BucketCounts) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevBucketCounts,\n' +
157-
' any(CountLength) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevCountLength,\n' +
158-
' any(AttributesHash) OVER (ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) AS PrevAttributesHash,\n' +
159-
' IF(AggregationTemporality = 1,\n' +
160-
' BucketCounts,\n' +
161-
' IF(AttributesHash = PrevAttributesHash AND CountLength = PrevCountLength,\n' +
162-
' arrayMap((prev, curr) -> IF(curr < prev, curr, toUInt64(toInt64(curr) - toInt64(prev))), PrevBucketCounts, BucketCounts),\n' +
163-
' BucketCounts)) as BucketRates\n' +
164-
' FROM (\n' +
165-
' SELECT *, cityHash64(mapConcat(ScopeAttributes, ResourceAttributes, Attributes)) AS AttributesHash,\n' +
166-
' length(BucketCounts) as CountLength\n' +
167-
' FROM default.otel_metrics_histogram)\n' +
168-
" WHERE MetricName = 'http.server.duration'\n " +
169-
' ORDER BY Attributes, TimeUnix ASC\n' +
170-
' ),RawHist AS (\n' +
171-
' SELECT *, toUInt64( 0.5 * arraySum(BucketRates)) AS Rank,\n' +
172-
' arrayCumSum(BucketRates) as CumRates,\n' +
173-
' arrayFirstIndex(x -> if(x > Rank, 1, 0), CumRates) AS BucketLowIdx,\n' +
174-
' IF(BucketLowIdx = length(BucketRates),\n' +
175-
' ExplicitBounds[length(ExplicitBounds)], -- if the low bound is the last bucket, use the last bound value\n' +
176-
' IF(BucketLowIdx > 1, -- indexes are 1-based\n' +
177-
' ExplicitBounds[BucketLowIdx] + (ExplicitBounds[BucketLowIdx + 1] - ExplicitBounds[BucketLowIdx]) *\n' +
178-
' intDivOrZero(\n' +
179-
' Rank - CumRates[BucketLowIdx - 1],\n' +
180-
' CumRates[BucketLowIdx] - CumRates[BucketLowIdx - 1]),\n' +
181-
' arrayElement(ExplicitBounds, BucketLowIdx + 1) * intDivOrZero(Rank, CumRates[BucketLowIdx]))) as Rate\n' +
182-
' FROM HistRate) SELECT sum(\n' +
183-
' toFloat64OrNull(toString(Rate))\n' +
184-
' )' +
185-
' FROM RawHist' +
186-
' WHERE (TimeUnix >= fromUnixTimestamp64Milli(1739318400000) AND TimeUnix <= fromUnixTimestamp64Milli(1765670400000))' +
187-
' LIMIT 10',
188-
);
159+
expect(actual).toMatchSnapshot();
189160
});
190161
});

0 commit comments

Comments
 (0)