Skip to content

Commit 8a729a8

Browse files
committed
Merge branch 'potel-base' into ivana/potel/slim-down-attrs
2 parents fea9875 + 5b6d37c commit 8a729a8

File tree

15 files changed

+58
-175
lines changed

15 files changed

+58
-175
lines changed

MIGRATION_GUIDE.md

+2
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ Looking to upgrade from Sentry SDK 2.x to 3.x? Here's a comprehensive list of wh
2727
- The `sampling_context` argument of `traces_sampler` and `profiles_sampler` now additionally contains all span attributes known at span start.
2828
- We updated how we handle `ExceptionGroup`s. You will now get more data if ExceptionGroups are appearing in chained exceptions. It could happen that after updating the SDK the grouping of issues change because of this. So eventually you will see the same exception in two Sentry issues (one from before the update, one from after the update)
2929
- The integration for Python `logging` module does not send Sentry issues by default anymore when calling `logging.error()`, `logging.critical()` or `logging.exception()`. If you want to preserve the old behavior use `sentry_sdk.init(integrations=[LoggingIntegration(event_level="ERROR")])`.
30+
- The `SentrySpanProcessor` and `SentryPropagator` are exported from `sentry_sdk.opentelemetry` instead of `sentry_sdk.integrations.opentelemetry`.
3031
- The integration-specific content of the `sampling_context` argument of `traces_sampler` and `profiles_sampler` now looks different.
3132
- The Celery integration doesn't add the `celery_job` dictionary anymore. Instead, the individual keys are now available as:
3233

@@ -137,6 +138,7 @@ Looking to upgrade from Sentry SDK 2.x to 3.x? Here's a comprehensive list of wh
137138
- The `enable_tracing` `init` option has been removed. Configure `traces_sample_rate` directly.
138139
- The `propagate_traces` `init` option has been removed. Use `trace_propagation_targets` instead.
139140
- The `custom_sampling_context` parameter of `start_transaction` has been removed. Use `attributes` instead to set key-value pairs of data that should be accessible in the traces sampler. Note that span attributes need to conform to the [OpenTelemetry specification](https://opentelemetry.io/docs/concepts/signals/traces/#attributes), meaning only certain types can be set as values.
141+
- `set_measurement` has been removed.
140142
- The PyMongo integration no longer sets tags. The data is still accessible via span attributes.
141143
- The PyMongo integration doesn't set `operation_ids` anymore. The individual IDs (`operation_id`, `request_id`, `session_id`) are now accessible as separate span attributes.
142144
- `sentry_sdk.metrics` and associated metrics APIs have been removed as Sentry no longer accepts metrics data in this form. See https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Upcoming-API-Changes-to-Metrics

sentry_sdk/__init__.py

-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
"set_context",
3737
"set_extra",
3838
"set_level",
39-
"set_measurement",
4039
"set_tag",
4140
"set_tags",
4241
"set_user",

sentry_sdk/_types.py

-41
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,6 @@ def substituted_because_contains_sensitive_data(cls):
107107
from typing import Callable
108108
from typing import Dict
109109
from typing import Mapping
110-
from typing import NotRequired
111110
from typing import Optional
112111
from typing import Type
113112
from typing_extensions import Literal, TypedDict
@@ -120,45 +119,6 @@ class SDKInfo(TypedDict):
120119
# "critical" is an alias of "fatal" recognized by Relay
121120
LogLevelStr = Literal["fatal", "critical", "error", "warning", "info", "debug"]
122121

123-
DurationUnit = Literal[
124-
"nanosecond",
125-
"microsecond",
126-
"millisecond",
127-
"second",
128-
"minute",
129-
"hour",
130-
"day",
131-
"week",
132-
]
133-
134-
InformationUnit = Literal[
135-
"bit",
136-
"byte",
137-
"kilobyte",
138-
"kibibyte",
139-
"megabyte",
140-
"mebibyte",
141-
"gigabyte",
142-
"gibibyte",
143-
"terabyte",
144-
"tebibyte",
145-
"petabyte",
146-
"pebibyte",
147-
"exabyte",
148-
"exbibyte",
149-
]
150-
151-
FractionUnit = Literal["ratio", "percent"]
152-
MeasurementUnit = Union[DurationUnit, InformationUnit, FractionUnit, str]
153-
154-
MeasurementValue = TypedDict(
155-
"MeasurementValue",
156-
{
157-
"value": float,
158-
"unit": NotRequired[Optional[MeasurementUnit]],
159-
},
160-
)
161-
162122
Event = TypedDict(
163123
"Event",
164124
{
@@ -180,7 +140,6 @@ class SDKInfo(TypedDict):
180140
"level": LogLevelStr,
181141
"logentry": Mapping[str, object],
182142
"logger": str,
183-
"measurements": dict[str, MeasurementValue],
184143
"message": str,
185144
"modules": dict[str, str],
186145
"monitor_config": Mapping[str, object],

sentry_sdk/ai/monitoring.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -106,14 +106,14 @@ def record_token_usage(
106106
if ai_pipeline_name:
107107
span.set_attribute("ai.pipeline.name", ai_pipeline_name)
108108
if prompt_tokens is not None:
109-
span.set_measurement("ai_prompt_tokens_used", value=prompt_tokens)
109+
span.set_attribute("ai.prompt_tokens.used", prompt_tokens)
110110
if completion_tokens is not None:
111-
span.set_measurement("ai_completion_tokens_used", value=completion_tokens)
111+
span.set_attribute("ai.completion_tokens.used", completion_tokens)
112112
if (
113113
total_tokens is None
114114
and prompt_tokens is not None
115115
and completion_tokens is not None
116116
):
117117
total_tokens = prompt_tokens + completion_tokens
118118
if total_tokens is not None:
119-
span.set_measurement("ai_total_tokens_used", total_tokens)
119+
span.set_attribute("ai.total_tokens.used", total_tokens)

sentry_sdk/api.py

-8
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@
5959
"set_context",
6060
"set_extra",
6161
"set_level",
62-
"set_measurement",
6362
"set_tag",
6463
"set_tags",
6564
"set_user",
@@ -287,13 +286,6 @@ def start_transaction(
287286
)
288287

289288

290-
def set_measurement(name, value, unit=""):
291-
# type: (str, float, sentry_sdk._types.MeasurementUnit) -> None
292-
transaction = get_current_scope().root_span
293-
if transaction is not None:
294-
transaction.set_measurement(name, value, unit)
295-
296-
297289
def get_current_span(scope=None):
298290
# type: (Optional[Scope]) -> Optional[sentry_sdk.tracing.Span]
299291
"""

sentry_sdk/opentelemetry/consts.py

-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ class SentrySpanAttribute:
2626
DESCRIPTION = "sentry.description"
2727
OP = "sentry.op"
2828
ORIGIN = "sentry.origin"
29-
MEASUREMENT = "sentry.measurement"
3029
TAG = "sentry.tag"
3130
NAME = "sentry.name"
3231
SOURCE = "sentry.source"

sentry_sdk/opentelemetry/span_processor.py

-4
Original file line numberDiff line numberDiff line change
@@ -308,10 +308,6 @@ def _common_span_transaction_attributes_as_json(self, span):
308308
"timestamp": convert_from_otel_timestamp(span.end_time),
309309
} # type: Event
310310

311-
measurements = extract_span_attributes(span, SentrySpanAttribute.MEASUREMENT)
312-
if measurements:
313-
common_json["measurements"] = measurements
314-
315311
tags = extract_span_attributes(span, SentrySpanAttribute.TAG)
316312
if tags:
317313
common_json["tags"] = tags

sentry_sdk/opentelemetry/utils.py

+1-9
Original file line numberDiff line numberDiff line change
@@ -309,15 +309,7 @@ def extract_span_attributes(span, namespace):
309309
for attr, value in (span.attributes or {}).items():
310310
if attr.startswith(namespace):
311311
key = attr[len(namespace) + 1 :]
312-
313-
if namespace == SentrySpanAttribute.MEASUREMENT:
314-
value = cast("tuple[str, str]", value)
315-
extracted_attrs[key] = {
316-
"value": float(value[0]),
317-
"unit": value[1],
318-
}
319-
else:
320-
extracted_attrs[key] = value
312+
extracted_attrs[key] = value
321313

322314
return extracted_attrs
323315

sentry_sdk/tracing.py

-12
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,6 @@
6565
R = TypeVar("R")
6666

6767
from sentry_sdk._types import (
68-
MeasurementUnit,
6968
SamplingContext,
7069
)
7170

@@ -150,10 +149,6 @@ def finish(
150149
# type: (...) -> None
151150
pass
152151

153-
def set_measurement(self, name, value, unit=""):
154-
# type: (str, float, MeasurementUnit) -> None
155-
pass
156-
157152
def set_context(self, key, value):
158153
# type: (str, dict[str, Any]) -> None
159154
pass
@@ -540,13 +535,6 @@ def set_status(self, status):
540535
else:
541536
self._otel_span.set_status(Status(otel_status, otel_description))
542537

543-
def set_measurement(self, name, value, unit=""):
544-
# type: (str, float, MeasurementUnit) -> None
545-
# Stringify value here since OTel expects all seq items to be of one type
546-
self.set_attribute(
547-
f"{SentrySpanAttribute.MEASUREMENT}.{name}", (str(value), unit)
548-
)
549-
550538
def set_thread(self, thread_id, thread_name):
551539
# type: (Optional[int], Optional[str]) -> None
552540
if thread_id is not None:

tests/integrations/anthropic/test_anthropic.py

+21-26
Original file line numberDiff line numberDiff line change
@@ -127,9 +127,9 @@ def test_nonstreaming_create_message(
127127
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
128128
assert SPANDATA.AI_RESPONSES not in span["data"]
129129

130-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
131-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
132-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
130+
assert span["data"]["ai.prompt_tokens.used"] == 10
131+
assert span["data"]["ai.completion_tokens.used"] == 20
132+
assert span["data"]["ai.total_tokens.used"] == 30
133133
assert span["data"]["ai.streaming"] is False
134134

135135

@@ -197,9 +197,9 @@ async def test_nonstreaming_create_message_async(
197197
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
198198
assert SPANDATA.AI_RESPONSES not in span["data"]
199199

200-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
201-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
202-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
200+
assert span["data"]["ai.prompt_tokens.used"] == 10
201+
assert span["data"]["ai.completion_tokens.used"] == 20
202+
assert span["data"]["ai.total_tokens.used"] == 30
203203
assert span["data"]["ai.streaming"] is False
204204

205205

@@ -299,9 +299,9 @@ def test_streaming_create_message(
299299
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
300300
assert SPANDATA.AI_RESPONSES not in span["data"]
301301

302-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
303-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
304-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
302+
assert span["data"]["ai.prompt_tokens.used"] == 10
303+
assert span["data"]["ai.completion_tokens.used"] == 30
304+
assert span["data"]["ai.total_tokens.used"] == 40
305305
assert span["data"]["ai.streaming"] is True
306306

307307

@@ -404,9 +404,9 @@ async def test_streaming_create_message_async(
404404
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
405405
assert SPANDATA.AI_RESPONSES not in span["data"]
406406

407-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
408-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
409-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
407+
assert span["data"]["ai.prompt_tokens.used"] == 10
408+
assert span["data"]["ai.completion_tokens.used"] == 30
409+
assert span["data"]["ai.total_tokens.used"] == 40
410410
assert span["data"]["ai.streaming"] is True
411411

412412

@@ -536,9 +536,9 @@ def test_streaming_create_message_with_input_json_delta(
536536
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
537537
assert SPANDATA.AI_RESPONSES not in span["data"]
538538

539-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
540-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
541-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
539+
assert span["data"]["ai.prompt_tokens.used"] == 366
540+
assert span["data"]["ai.completion_tokens.used"] == 51
541+
assert span["data"]["ai.total_tokens.used"] == 417
542542
assert span["data"]["ai.streaming"] is True
543543

544544

@@ -675,9 +675,9 @@ async def test_streaming_create_message_with_input_json_delta_async(
675675
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
676676
assert SPANDATA.AI_RESPONSES not in span["data"]
677677

678-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
679-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
680-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
678+
assert span["data"]["ai.prompt_tokens.used"] == 366
679+
assert span["data"]["ai.completion_tokens.used"] == 51
680+
assert span["data"]["ai.total_tokens.used"] == 417
681681
assert span["data"]["ai.streaming"] is True
682682

683683

@@ -822,11 +822,6 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init, capture_events):
822822
content_blocks=["{'test': 'data',", "'more': 'json'}"],
823823
)
824824

825-
# assert span._data.get("ai.streaming") is True
826-
# assert span._measurements.get("ai_prompt_tokens_used")["value"] == 10
827-
# assert span._measurements.get("ai_completion_tokens_used")["value"] == 20
828-
# assert span._measurements.get("ai_total_tokens_used")["value"] == 30
829-
830825
(event,) = events
831826

832827
assert len(event["spans"]) == 1
@@ -836,6 +831,6 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init, capture_events):
836831
[{"type": "text", "text": "{'test': 'data','more': 'json'}"}]
837832
)
838833
assert span["data"]["ai.streaming"] is True
839-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
840-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
841-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
834+
assert span["data"]["ai.prompt_tokens.used"] == 10
835+
assert span["data"]["ai.completion_tokens.used"] == 20
836+
assert span["data"]["ai.total_tokens.used"] == 30

tests/integrations/cohere/test_cohere.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,9 @@ def test_nonstreaming_chat(
6464
assert "ai.input_messages" not in span["data"]
6565
assert "ai.responses" not in span["data"]
6666

67-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
68-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
69-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
67+
assert span["data"]["ai.completion_tokens.used"] == 10
68+
assert span["data"]["ai.prompt_tokens.used"] == 20
69+
assert span["data"]["ai.total_tokens.used"] == 30
7070

7171

7272
# noinspection PyTypeChecker
@@ -136,9 +136,9 @@ def test_streaming_chat(sentry_init, capture_events, send_default_pii, include_p
136136
assert "ai.input_messages" not in span["data"]
137137
assert "ai.responses" not in span["data"]
138138

139-
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
140-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
141-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
139+
assert span["data"]["ai.completion_tokens.used"] == 10
140+
assert span["data"]["ai.prompt_tokens.used"] == 20
141+
assert span["data"]["ai.total_tokens.used"] == 30
142142

143143

144144
def test_bad_chat(sentry_init, capture_events):
@@ -200,8 +200,8 @@ def test_embed(sentry_init, capture_events, send_default_pii, include_prompts):
200200
else:
201201
assert "ai.input_messages" not in span["data"]
202202

203-
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
204-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
203+
assert span["data"]["ai.prompt_tokens.used"] == 10
204+
assert span["data"]["ai.total_tokens.used"] == 10
205205

206206

207207
def test_span_origin_chat(sentry_init, capture_events):

tests/integrations/huggingface_hub/test_huggingface_hub.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ def test_nonstreaming_chat_completion(
7474
assert "ai.responses" not in span["data"]
7575

7676
if details_arg:
77-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
77+
assert span["data"]["ai.total_tokens.used"] == 10
7878

7979

8080
@pytest.mark.parametrize(
@@ -133,7 +133,7 @@ def test_streaming_chat_completion(
133133
assert "ai.responses" not in span["data"]
134134

135135
if details_arg:
136-
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
136+
assert span["data"]["ai.total_tokens.used"] == 10
137137

138138

139139
def test_bad_chat_completion(sentry_init, capture_events):

tests/integrations/langchain/test_langchain.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -179,12 +179,13 @@ def test_langchain_agent(
179179
assert len(list(x for x in tx["spans"] if x["op"] == "ai.run.langchain")) > 0
180180

181181
if use_unknown_llm_type:
182-
assert "ai_prompt_tokens_used" in chat_spans[0]["measurements"]
183-
assert "ai_total_tokens_used" in chat_spans[0]["measurements"]
182+
assert "ai.prompt_tokens.used" in chat_spans[0]["data"]
183+
assert "ai.total_tokens.used" in chat_spans[0]["data"]
184184
else:
185185
# important: to avoid double counting, we do *not* measure
186186
# tokens used if we have an explicit integration (e.g. OpenAI)
187-
assert "measurements" not in chat_spans[0]
187+
assert "ai.prompt_tokens.used" not in chat_spans[0]["data"]
188+
assert "ai.total_tokens.used" not in chat_spans[0]["data"]
188189

189190
if send_default_pii and include_prompts:
190191
assert "You are very powerful" in chat_spans[0]["data"]["ai.input_messages"]

0 commit comments

Comments
 (0)