|
| 1 | +// Copyright The OpenTelemetry Authors |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +#include "arguments.h" |
| 5 | +#include "go_context.h" |
| 6 | +#include "go_types.h" |
| 7 | +#include "trace/span_context.h" |
| 8 | +#include "trace/start_span.h" |
| 9 | +#include "trace/span_output.h" |
| 10 | + |
| 11 | +char __license[] SEC("license") = "Dual MIT/GPL"; |
| 12 | + |
| 13 | +#define MAX_CONCURRENT 50 |
| 14 | +// TODO: tune this. This is a just a guess, but we should be able to determine |
| 15 | +// the maximum size of a span (based on limits) and set this. Ideally, we could |
| 16 | +// also look into a tiered allocation strategy so we do not over allocate |
| 17 | +// space (i.e. small, medium, large data sizes). |
| 18 | +#define MAX_SIZE 2048 |
| 19 | + |
| 20 | +// Injected const. |
| 21 | +volatile const u64 span_context_trace_id_pos; |
| 22 | +volatile const u64 span_context_span_id_pos; |
| 23 | +volatile const u64 span_context_trace_flags_pos; |
| 24 | + |
| 25 | +// Records state of our write to auto-instrumentation flag. |
| 26 | +bool wrote_flag = false; |
| 27 | + |
| 28 | +struct control_t { |
| 29 | + u64 kind; // Required to be 1. |
| 30 | +}; |
| 31 | + |
| 32 | +struct otel_span_t { |
| 33 | + struct span_context sc; |
| 34 | + struct span_context psc; |
| 35 | +}; |
| 36 | + |
| 37 | +struct { |
| 38 | + __uint(type, BPF_MAP_TYPE_HASH); |
| 39 | + __type(key, void*); |
| 40 | + __type(value, struct otel_span_t); |
| 41 | + __uint(max_entries, MAX_CONCURRENT); |
| 42 | +} active_spans_by_span_ptr SEC(".maps"); |
| 43 | + |
| 44 | +struct event_t { |
| 45 | + u64 kind; // Required to be 0. |
| 46 | + u32 size; |
| 47 | + char data[MAX_SIZE]; |
| 48 | +}; |
| 49 | + |
| 50 | +struct { |
| 51 | + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); |
| 52 | + __uint(key_size, sizeof(u32)); |
| 53 | + __uint(value_size, sizeof(struct event_t)); |
| 54 | + __uint(max_entries, 1); |
| 55 | +} new_event SEC(".maps"); |
| 56 | + |
| 57 | +static __always_inline long write_span_context(void *go_sc, struct span_context *sc) { |
| 58 | + if (go_sc == NULL) { |
| 59 | + bpf_printk("write_span_context: NULL go_sc"); |
| 60 | + return -1; |
| 61 | + } |
| 62 | + |
| 63 | + void *tid = (void *)(go_sc + span_context_trace_id_pos); |
| 64 | + long ret = bpf_probe_write_user(tid, &sc->TraceID, TRACE_ID_SIZE); |
| 65 | + if (ret != 0) { |
| 66 | + bpf_printk("write_span_context: failed to write trace ID: %ld", ret); |
| 67 | + return -2; |
| 68 | + } |
| 69 | + |
| 70 | + void *sid = (void *)(go_sc + span_context_span_id_pos); |
| 71 | + ret = bpf_probe_write_user(sid, &sc->SpanID, SPAN_ID_SIZE); |
| 72 | + if (ret != 0) { |
| 73 | + bpf_printk("write_span_context: failed to write span ID: %ld", ret); |
| 74 | + return -3; |
| 75 | + } |
| 76 | + |
| 77 | + void *flags = (void *)(go_sc + span_context_trace_flags_pos); |
| 78 | + ret = bpf_probe_write_user(flags, &sc->TraceFlags, TRACE_FLAGS_SIZE); |
| 79 | + if (ret != 0) { |
| 80 | + bpf_printk("write_span_context: failed to write trace flags: %ld", ret); |
| 81 | + return -4; |
| 82 | + } |
| 83 | + |
| 84 | + return 0; |
| 85 | +} |
| 86 | + |
| 87 | +// This instrumentation attaches uprobe to the following function: |
| 88 | +// func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider |
| 89 | +// https://github.com/open-telemetry/opentelemetry-go/blob/2e8d5a99340b1e11ca6b19bcdfcbfe9cd0c2c385/trace/noop.go#L98C1-L98C65 |
| 90 | +SEC("uprobe/tracerProvider") |
| 91 | +int uprobe_tracerProvider(struct pt_regs *ctx) { |
| 92 | + if (wrote_flag) { |
| 93 | + // Already wrote flag value. |
| 94 | + return 0; |
| 95 | + } |
| 96 | + |
| 97 | + void *flag_ptr = get_argument(ctx, 3); |
| 98 | + if (flag_ptr == NULL) { |
| 99 | + bpf_printk("invalid flag_ptr: NULL"); |
| 100 | + return -1; |
| 101 | + } |
| 102 | + |
| 103 | + bool true_value = true; |
| 104 | + long res = bpf_probe_write_user(flag_ptr, &true_value, sizeof(bool)); |
| 105 | + if (res != 0) { |
| 106 | + bpf_printk("failed to write bool flag value: %ld", res); |
| 107 | + return -2; |
| 108 | + } |
| 109 | + |
| 110 | + wrote_flag = true; |
| 111 | + |
| 112 | + // Signal this uprobe should be unloaded. |
| 113 | + struct control_t ctrl = {1}; |
| 114 | + return bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, (void *)(&ctrl), sizeof(struct control_t)); |
| 115 | +} |
| 116 | + |
| 117 | +// This instrumentation attaches a uprobe to the following function: |
| 118 | +// func (t *autoTracer) start(ctx context.Context, spanPtr *autoSpan, psc *SpanContext, sampled *bool, sc *SpanContext) |
| 119 | +// https://github.com/open-telemetry/opentelemetry-go/blob/2e8d5a99340b1e11ca6b19bcdfcbfe9cd0c2c385/trace/auto.go#L81-L92 |
| 120 | +SEC("uprobe/Tracer_start") |
| 121 | +int uprobe_Tracer_start(struct pt_regs *ctx) { |
| 122 | + struct go_iface go_context = {0}; |
| 123 | + get_Go_context(ctx, 2, 0, true, &go_context); |
| 124 | + |
| 125 | + struct otel_span_t otel_span; |
| 126 | + __builtin_memset(&otel_span, 0, sizeof(struct otel_span_t)); |
| 127 | + |
| 128 | + start_span_params_t params = { |
| 129 | + .ctx = ctx, |
| 130 | + .go_context = &go_context, |
| 131 | + .psc = &otel_span.psc, |
| 132 | + .sc = &otel_span.sc, |
| 133 | + .get_parent_span_context_fn = NULL, |
| 134 | + .get_parent_span_context_arg = NULL, // Default to new root. |
| 135 | + }; |
| 136 | + |
| 137 | + start_span(¶ms); |
| 138 | + |
| 139 | + void *parent_span_context = get_argument(ctx, 5); |
| 140 | + long rc = write_span_context(parent_span_context, &otel_span.psc); |
| 141 | + if (rc != 0) { |
| 142 | + bpf_printk("failed to write parent span context: %ld", rc); |
| 143 | + } |
| 144 | + |
| 145 | + if (!is_sampled(params.sc)) { |
| 146 | + // Default SDK behaviour is to sample everything. Only set the sampled |
| 147 | + // value to false when needed. |
| 148 | + void *sampled_ptr_val = get_argument(ctx, 6); |
| 149 | + if (sampled_ptr_val == NULL) { |
| 150 | + bpf_printk("nil sampled pointer"); |
| 151 | + } else { |
| 152 | + bool false_val = false; |
| 153 | + rc = bpf_probe_write_user(sampled_ptr_val, &false_val, sizeof(bool)); |
| 154 | + if (rc != 0) { |
| 155 | + bpf_printk("bpf_probe_write_user: failed to write sampled value: %ld", rc); |
| 156 | + } else { |
| 157 | + bpf_printk("wrote sampled value"); |
| 158 | + } |
| 159 | + } |
| 160 | + } |
| 161 | + |
| 162 | + void *span_context_ptr_val = get_argument(ctx, 7); |
| 163 | + rc = write_span_context(span_context_ptr_val, &otel_span.sc); |
| 164 | + if (rc != 0) { |
| 165 | + bpf_printk("failed to write span context: %ld", rc); |
| 166 | + } |
| 167 | + |
| 168 | + void *span_ptr_val = get_argument(ctx, 4); |
| 169 | + bpf_map_update_elem(&active_spans_by_span_ptr, &span_ptr_val, &otel_span, 0); |
| 170 | + start_tracking_span(go_context.data, &otel_span.sc); |
| 171 | + |
| 172 | + return 0; |
| 173 | +} |
| 174 | + |
| 175 | +// This instrumentation attaches a uprobe to the following function: |
| 176 | +// func (*autoSpan) ended(buf []byte) {} |
| 177 | +// https://github.com/open-telemetry/opentelemetry-go/blob/2e8d5a99340b1e11ca6b19bcdfcbfe9cd0c2c385/trace/auto.go#L435-L448 |
| 178 | +SEC("uprobe/Span_ended") |
| 179 | +int uprobe_Span_ended(struct pt_regs *ctx) { |
| 180 | + void *span_ptr = get_argument(ctx, 1); |
| 181 | + struct otel_span_t *span = bpf_map_lookup_elem(&active_spans_by_span_ptr, &span_ptr); |
| 182 | + if (span == NULL) { |
| 183 | + return 0; |
| 184 | + } |
| 185 | + bool sampled = is_sampled(&span->sc); |
| 186 | + stop_tracking_span(&span->sc, &span->psc); |
| 187 | + bpf_map_delete_elem(&active_spans_by_span_ptr, &span_ptr); |
| 188 | + |
| 189 | + // Do not output un-sampled span data. |
| 190 | + if (!sampled) return 0; |
| 191 | + |
| 192 | + u64 len = (u64)get_argument(ctx, 3); |
| 193 | + if (len > MAX_SIZE) { |
| 194 | + bpf_printk("span data too large: %d", len); |
| 195 | + return -1; |
| 196 | + } |
| 197 | + if (len == 0) { |
| 198 | + bpf_printk("empty span data"); |
| 199 | + return 0; |
| 200 | + } |
| 201 | + |
| 202 | + void *data_ptr = get_argument(ctx, 2); |
| 203 | + if (data_ptr == NULL) { |
| 204 | + bpf_printk("empty span data"); |
| 205 | + return 0; |
| 206 | + } |
| 207 | + |
| 208 | + u32 key = 0; |
| 209 | + struct event_t *event = bpf_map_lookup_elem(&new_event, &key); |
| 210 | + if (event == NULL) { |
| 211 | + bpf_printk("failed to initialize new event"); |
| 212 | + return -2; |
| 213 | + } |
| 214 | + event->size = (u32)len; |
| 215 | + |
| 216 | + if (event->size < MAX_SIZE) { |
| 217 | + long rc = bpf_probe_read(&event->data, event->size, data_ptr); |
| 218 | + if (rc < 0) { |
| 219 | + bpf_printk("failed to read encoded span data"); |
| 220 | + return -3; |
| 221 | + } |
| 222 | + } else { |
| 223 | + bpf_printk("read too large: %d", event->size); |
| 224 | + return -4; |
| 225 | + } |
| 226 | + |
| 227 | + // Do not send the whole size.buf if it is not needed. |
| 228 | + u64 size = sizeof(event->kind) + sizeof(event->size) + event->size; |
| 229 | + // Make the verifier happy, ensure no unbounded memory access. |
| 230 | + if (size < sizeof(struct event_t)+1) { |
| 231 | + return bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, event, size); |
| 232 | + } |
| 233 | + bpf_printk("write too large: %d", event->size); |
| 234 | + return -5; |
| 235 | +} |
0 commit comments