Skip to content

Commit aa2e93b

Browse files
Brian VazquezAlexei Starovoitov
Brian Vazquez
authored and
Alexei Starovoitov
committed
bpf: Add generic support for update and delete batch ops
This commit adds generic support for update and delete batch ops that can be used for almost all the bpf maps. These commands share the same UAPI attr that lookup and lookup_and_delete batch ops use and the syscall commands are: BPF_MAP_UPDATE_BATCH BPF_MAP_DELETE_BATCH The main difference between update/delete and lookup batch ops is that for update/delete keys/values must be specified for userspace and because of that, neither in_batch nor out_batch are used. Suggested-by: Stanislav Fomichev <[email protected]> Signed-off-by: Brian Vazquez <[email protected]> Signed-off-by: Yonghong Song <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/[email protected]
1 parent cb4d03a commit aa2e93b

File tree

3 files changed

+127
-0
lines changed

3 files changed

+127
-0
lines changed

include/linux/bpf.h

+10
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,10 @@ struct bpf_map_ops {
4646
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
4747
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
4848
union bpf_attr __user *uattr);
49+
int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
50+
union bpf_attr __user *uattr);
51+
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
52+
union bpf_attr __user *uattr);
4953

5054
/* funcs callable from userspace and from eBPF programs */
5155
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -987,6 +991,12 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
987991
int generic_map_lookup_batch(struct bpf_map *map,
988992
const union bpf_attr *attr,
989993
union bpf_attr __user *uattr);
994+
int generic_map_update_batch(struct bpf_map *map,
995+
const union bpf_attr *attr,
996+
union bpf_attr __user *uattr);
997+
int generic_map_delete_batch(struct bpf_map *map,
998+
const union bpf_attr *attr,
999+
union bpf_attr __user *uattr);
9901000

9911001
extern int sysctl_unprivileged_bpf_disabled;
9921002

include/uapi/linux/bpf.h

+2
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,8 @@ enum bpf_cmd {
108108
BPF_MAP_FREEZE,
109109
BPF_BTF_GET_NEXT_ID,
110110
BPF_MAP_LOOKUP_BATCH,
111+
BPF_MAP_UPDATE_BATCH,
112+
BPF_MAP_DELETE_BATCH,
111113
};
112114

113115
enum bpf_map_type {

kernel/bpf/syscall.c

+115
Original file line numberDiff line numberDiff line change
@@ -1218,6 +1218,111 @@ static int map_get_next_key(union bpf_attr *attr)
12181218
return err;
12191219
}
12201220

1221+
int generic_map_delete_batch(struct bpf_map *map,
1222+
const union bpf_attr *attr,
1223+
union bpf_attr __user *uattr)
1224+
{
1225+
void __user *keys = u64_to_user_ptr(attr->batch.keys);
1226+
u32 cp, max_count;
1227+
int err = 0;
1228+
void *key;
1229+
1230+
if (attr->batch.elem_flags & ~BPF_F_LOCK)
1231+
return -EINVAL;
1232+
1233+
if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1234+
!map_value_has_spin_lock(map)) {
1235+
return -EINVAL;
1236+
}
1237+
1238+
max_count = attr->batch.count;
1239+
if (!max_count)
1240+
return 0;
1241+
1242+
for (cp = 0; cp < max_count; cp++) {
1243+
key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
1244+
if (IS_ERR(key)) {
1245+
err = PTR_ERR(key);
1246+
break;
1247+
}
1248+
1249+
if (bpf_map_is_dev_bound(map)) {
1250+
err = bpf_map_offload_delete_elem(map, key);
1251+
break;
1252+
}
1253+
1254+
preempt_disable();
1255+
__this_cpu_inc(bpf_prog_active);
1256+
rcu_read_lock();
1257+
err = map->ops->map_delete_elem(map, key);
1258+
rcu_read_unlock();
1259+
__this_cpu_dec(bpf_prog_active);
1260+
preempt_enable();
1261+
maybe_wait_bpf_programs(map);
1262+
if (err)
1263+
break;
1264+
}
1265+
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1266+
err = -EFAULT;
1267+
return err;
1268+
}
1269+
1270+
int generic_map_update_batch(struct bpf_map *map,
1271+
const union bpf_attr *attr,
1272+
union bpf_attr __user *uattr)
1273+
{
1274+
void __user *values = u64_to_user_ptr(attr->batch.values);
1275+
void __user *keys = u64_to_user_ptr(attr->batch.keys);
1276+
u32 value_size, cp, max_count;
1277+
int ufd = attr->map_fd;
1278+
void *key, *value;
1279+
struct fd f;
1280+
int err = 0;
1281+
1282+
f = fdget(ufd);
1283+
if (attr->batch.elem_flags & ~BPF_F_LOCK)
1284+
return -EINVAL;
1285+
1286+
if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1287+
!map_value_has_spin_lock(map)) {
1288+
return -EINVAL;
1289+
}
1290+
1291+
value_size = bpf_map_value_size(map);
1292+
1293+
max_count = attr->batch.count;
1294+
if (!max_count)
1295+
return 0;
1296+
1297+
value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1298+
if (!value)
1299+
return -ENOMEM;
1300+
1301+
for (cp = 0; cp < max_count; cp++) {
1302+
key = __bpf_copy_key(keys + cp * map->key_size, map->key_size);
1303+
if (IS_ERR(key)) {
1304+
err = PTR_ERR(key);
1305+
break;
1306+
}
1307+
err = -EFAULT;
1308+
if (copy_from_user(value, values + cp * value_size, value_size))
1309+
break;
1310+
1311+
err = bpf_map_update_value(map, f, key, value,
1312+
attr->batch.elem_flags);
1313+
1314+
if (err)
1315+
break;
1316+
}
1317+
1318+
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1319+
err = -EFAULT;
1320+
1321+
kfree(value);
1322+
kfree(key);
1323+
return err;
1324+
}
1325+
12211326
#define MAP_LOOKUP_RETRIES 3
12221327

12231328
int generic_map_lookup_batch(struct bpf_map *map,
@@ -3219,6 +3324,10 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
32193324

32203325
if (cmd == BPF_MAP_LOOKUP_BATCH)
32213326
BPF_DO_BATCH(map->ops->map_lookup_batch);
3327+
else if (cmd == BPF_MAP_UPDATE_BATCH)
3328+
BPF_DO_BATCH(map->ops->map_update_batch);
3329+
else
3330+
BPF_DO_BATCH(map->ops->map_delete_batch);
32223331

32233332
err_put:
32243333
fdput(f);
@@ -3325,6 +3434,12 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
33253434
case BPF_MAP_LOOKUP_BATCH:
33263435
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
33273436
break;
3437+
case BPF_MAP_UPDATE_BATCH:
3438+
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
3439+
break;
3440+
case BPF_MAP_DELETE_BATCH:
3441+
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
3442+
break;
33283443
default:
33293444
err = -EINVAL;
33303445
break;

0 commit comments

Comments
 (0)