Skip to content

Commit 9c959c8

Browse files
Alexei StarovoitovIngo Molnar
Alexei Starovoitov
authored and
Ingo Molnar
committed
tracing: Allow BPF programs to call bpf_trace_printk()
Debugging of BPF programs needs some form of printk from the program, so let programs call limited trace_printk() with %d %u %x %p modifiers only. Similar to kernel modules, during program load verifier checks whether program is calling bpf_trace_printk() and if so, kernel allocates trace_printk buffers and emits big 'this is debug only' banner. Signed-off-by: Alexei Starovoitov <[email protected]> Reviewed-by: Steven Rostedt <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Daniel Borkmann <[email protected]> Cc: David S. Miller <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Masami Hiramatsu <[email protected]> Cc: Namhyung Kim <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Peter Zijlstra <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent d9847d3 commit 9c959c8

File tree

2 files changed

+79
-0
lines changed

2 files changed

+79
-0
lines changed

include/uapi/linux/bpf.h

+1
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,7 @@ enum bpf_func_id {
166166
BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
167167
BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */
168168
BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */
169+
BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
169170
__BPF_FUNC_MAX_ID,
170171
};
171172

kernel/trace/bpf_trace.c

+78
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <linux/bpf.h>
1111
#include <linux/filter.h>
1212
#include <linux/uaccess.h>
13+
#include <linux/ctype.h>
1314
#include "trace.h"
1415

1516
static DEFINE_PER_CPU(int, bpf_prog_active);
@@ -90,6 +91,74 @@ static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
9091
.ret_type = RET_INTEGER,
9192
};
9293

94+
/*
95+
* limited trace_printk()
96+
* only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
97+
*/
98+
static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
99+
{
100+
char *fmt = (char *) (long) r1;
101+
int mod[3] = {};
102+
int fmt_cnt = 0;
103+
int i;
104+
105+
/*
106+
* bpf_check()->check_func_arg()->check_stack_boundary()
107+
* guarantees that fmt points to bpf program stack,
108+
* fmt_size bytes of it were initialized and fmt_size > 0
109+
*/
110+
if (fmt[--fmt_size] != 0)
111+
return -EINVAL;
112+
113+
/* check format string for allowed specifiers */
114+
for (i = 0; i < fmt_size; i++) {
115+
if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
116+
return -EINVAL;
117+
118+
if (fmt[i] != '%')
119+
continue;
120+
121+
if (fmt_cnt >= 3)
122+
return -EINVAL;
123+
124+
/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
125+
i++;
126+
if (fmt[i] == 'l') {
127+
mod[fmt_cnt]++;
128+
i++;
129+
} else if (fmt[i] == 'p') {
130+
mod[fmt_cnt]++;
131+
i++;
132+
if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
133+
return -EINVAL;
134+
fmt_cnt++;
135+
continue;
136+
}
137+
138+
if (fmt[i] == 'l') {
139+
mod[fmt_cnt]++;
140+
i++;
141+
}
142+
143+
if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
144+
return -EINVAL;
145+
fmt_cnt++;
146+
}
147+
148+
return __trace_printk(1/* fake ip will not be printed */, fmt,
149+
mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
150+
mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
151+
mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
152+
}
153+
154+
static const struct bpf_func_proto bpf_trace_printk_proto = {
155+
.func = bpf_trace_printk,
156+
.gpl_only = true,
157+
.ret_type = RET_INTEGER,
158+
.arg1_type = ARG_PTR_TO_STACK,
159+
.arg2_type = ARG_CONST_STACK_SIZE,
160+
};
161+
93162
static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
94163
{
95164
switch (func_id) {
@@ -103,6 +172,15 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
103172
return &bpf_probe_read_proto;
104173
case BPF_FUNC_ktime_get_ns:
105174
return &bpf_ktime_get_ns_proto;
175+
176+
case BPF_FUNC_trace_printk:
177+
/*
178+
* this program might be calling bpf_trace_printk,
179+
* so allocate per-cpu printk buffers
180+
*/
181+
trace_printk_init_buffers();
182+
183+
return &bpf_trace_printk_proto;
106184
default:
107185
return NULL;
108186
}

0 commit comments

Comments
 (0)