10
10
#include <linux/bpf.h>
11
11
#include <linux/filter.h>
12
12
#include <linux/uaccess.h>
13
+ #include <linux/ctype.h>
13
14
#include "trace.h"
14
15
15
16
static DEFINE_PER_CPU (int , bpf_prog_active ) ;
@@ -90,6 +91,74 @@ static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
90
91
.ret_type = RET_INTEGER ,
91
92
};
92
93
94
+ /*
95
+ * limited trace_printk()
96
+ * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
97
+ */
98
+ static u64 bpf_trace_printk (u64 r1 , u64 fmt_size , u64 r3 , u64 r4 , u64 r5 )
99
+ {
100
+ char * fmt = (char * ) (long ) r1 ;
101
+ int mod [3 ] = {};
102
+ int fmt_cnt = 0 ;
103
+ int i ;
104
+
105
+ /*
106
+ * bpf_check()->check_func_arg()->check_stack_boundary()
107
+ * guarantees that fmt points to bpf program stack,
108
+ * fmt_size bytes of it were initialized and fmt_size > 0
109
+ */
110
+ if (fmt [-- fmt_size ] != 0 )
111
+ return - EINVAL ;
112
+
113
+ /* check format string for allowed specifiers */
114
+ for (i = 0 ; i < fmt_size ; i ++ ) {
115
+ if ((!isprint (fmt [i ]) && !isspace (fmt [i ])) || !isascii (fmt [i ]))
116
+ return - EINVAL ;
117
+
118
+ if (fmt [i ] != '%' )
119
+ continue ;
120
+
121
+ if (fmt_cnt >= 3 )
122
+ return - EINVAL ;
123
+
124
+ /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
125
+ i ++ ;
126
+ if (fmt [i ] == 'l' ) {
127
+ mod [fmt_cnt ]++ ;
128
+ i ++ ;
129
+ } else if (fmt [i ] == 'p' ) {
130
+ mod [fmt_cnt ]++ ;
131
+ i ++ ;
132
+ if (!isspace (fmt [i ]) && !ispunct (fmt [i ]) && fmt [i ] != 0 )
133
+ return - EINVAL ;
134
+ fmt_cnt ++ ;
135
+ continue ;
136
+ }
137
+
138
+ if (fmt [i ] == 'l' ) {
139
+ mod [fmt_cnt ]++ ;
140
+ i ++ ;
141
+ }
142
+
143
+ if (fmt [i ] != 'd' && fmt [i ] != 'u' && fmt [i ] != 'x' )
144
+ return - EINVAL ;
145
+ fmt_cnt ++ ;
146
+ }
147
+
148
+ return __trace_printk (1 /* fake ip will not be printed */ , fmt ,
149
+ mod [0 ] == 2 ? r3 : mod [0 ] == 1 ? (long ) r3 : (u32 ) r3 ,
150
+ mod [1 ] == 2 ? r4 : mod [1 ] == 1 ? (long ) r4 : (u32 ) r4 ,
151
+ mod [2 ] == 2 ? r5 : mod [2 ] == 1 ? (long ) r5 : (u32 ) r5 );
152
+ }
153
+
154
+ static const struct bpf_func_proto bpf_trace_printk_proto = {
155
+ .func = bpf_trace_printk ,
156
+ .gpl_only = true,
157
+ .ret_type = RET_INTEGER ,
158
+ .arg1_type = ARG_PTR_TO_STACK ,
159
+ .arg2_type = ARG_CONST_STACK_SIZE ,
160
+ };
161
+
93
162
static const struct bpf_func_proto * kprobe_prog_func_proto (enum bpf_func_id func_id )
94
163
{
95
164
switch (func_id ) {
@@ -103,6 +172,15 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
103
172
return & bpf_probe_read_proto ;
104
173
case BPF_FUNC_ktime_get_ns :
105
174
return & bpf_ktime_get_ns_proto ;
175
+
176
+ case BPF_FUNC_trace_printk :
177
+ /*
178
+ * this program might be calling bpf_trace_printk,
179
+ * so allocate per-cpu printk buffers
180
+ */
181
+ trace_printk_init_buffers ();
182
+
183
+ return & bpf_trace_printk_proto ;
106
184
default :
107
185
return NULL ;
108
186
}
0 commit comments