Skip to content

Commit 5752b86

Browse files
committed
BPF task work WIP
Signed-off-by: Mykyta Yatsenko <[email protected]>
1 parent e41079f commit 5752b86

File tree

6 files changed

+189
-12
lines changed

6 files changed

+189
-12
lines changed

include/uapi/linux/bpf.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7375,6 +7375,10 @@ struct bpf_timer {
73757375
__u64 __opaque[2];
73767376
} __attribute__((aligned(8)));
73777377

7378+
struct bpf_task_work {
7379+
__u64 __opaque[2];
7380+
} __attribute__((aligned(8)));
7381+
73787382
struct bpf_wq {
73797383
__u64 __opaque[2];
73807384
} __attribute__((aligned(8)));

kernel/bpf/helpers.c

Lines changed: 57 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
#include <linux/bpf_mem_alloc.h>
2525
#include <linux/kasan.h>
2626
#include <linux/bpf_verifier.h>
27-
27+
#include <linux/task_work.h>
2828
#include "../../lib/kstrtox.h"
2929

3030
/* If kernel subsystem is allowing eBPF programs to call this function,
@@ -1141,6 +1141,61 @@ enum bpf_async_type {
11411141
BPF_ASYNC_TYPE_WQ,
11421142
};
11431143

1144+
struct bpf_defer {
1145+
struct bpf_map *map;
1146+
bpf_callback_t callback_fn;
1147+
struct callback_head work;
1148+
};
1149+
1150+
struct bpf_defer_work {
1151+
struct bpf_defer *defer;
1152+
} __attribute__((aligned(8)));
1153+
/*
1154+
static void bpf_task_work_callback(struct callback_head *cb)
1155+
{
1156+
struct bpf_defer *defer = container_of(cb, struct bpf_defer, work);
1157+
bpf_callback_t callback_fn;
1158+
1159+
printk("Callback called %p\n", defer);
1160+
1161+
callback_fn = defer->callback_fn;
1162+
printk("Callback called is %p\n", callback_fn);
1163+
if (callback_fn) {
1164+
printk("Callback called 2 %p\n", callback_fn);
1165+
printk("Key size %d\n", defer->map->key_size);
1166+
callback_fn(0, 0, 0, 0, 0);
1167+
printk("Callback called 3 %p\n", callback_fn);
1168+
}
1169+
}
1170+
*/
1171+
__bpf_kfunc int bpf_task_work_schedule(void* callback__ign)
1172+
{
1173+
bpf_callback_t callback_fn;
1174+
//struct bpf_defer *defer;
1175+
/*
1176+
struct bpf_defer_work *defer_work = (struct bpf_defer_work *)task_work;
1177+
1178+
BTF_TYPE_EMIT(struct bpf_task_work);
1179+
1180+
defer = bpf_map_kmalloc_node(map, sizeof(struct bpf_defer), GFP_ATOMIC, map->numa_node);
1181+
if (!defer) {
1182+
return -ENOMEM;
1183+
}
1184+
//defer->map = map;
1185+
defer->work.func = bpf_task_work_callback;
1186+
defer->work.next = NULL;
1187+
defer->callback_fn = callback__ign;
1188+
printk("Callback is %p\n", callback__ign);
1189+
defer_work->defer = defer;
1190+
printk("Scheduling callback\n");
1191+
*/
1192+
callback_fn = callback__ign;
1193+
callback_fn(0,0,0,0,0);
1194+
//task_work_add(NULL, &defer->work, TWA_RESUME);
1195+
printk("Callback scheduled \n");
1196+
return 0;
1197+
}
1198+
11441199
static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
11451200

11461201
static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
@@ -3303,7 +3358,7 @@ BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
33033358
BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL)
33043359
BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL)
33053360
BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL)
3306-
3361+
BTF_ID_FLAGS(func, bpf_task_work_schedule)
33073362
#ifdef CONFIG_CGROUPS
33083363
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
33093364
BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)

kernel/bpf/verifier.c

Lines changed: 49 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -517,9 +517,11 @@ static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
517517
func_id == BPF_FUNC_user_ringbuf_drain;
518518
}
519519

520+
static bool is_kfunc_callback_calling_function(enum bpf_func_id func_id);
521+
520522
static bool is_async_callback_calling_function(enum bpf_func_id func_id)
521523
{
522-
return func_id == BPF_FUNC_timer_set_callback;
524+
return func_id == BPF_FUNC_timer_set_callback || is_kfunc_callback_calling_function(func_id);
523525
}
524526

525527
static bool is_callback_calling_function(enum bpf_func_id func_id)
@@ -10781,6 +10783,24 @@ static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
1078110783
return 0;
1078210784
}
1078310785

10786+
static int set_task_work_schedule_callback_state(struct bpf_verifier_env *env,
10787+
struct bpf_func_state *caller,
10788+
struct bpf_func_state *callee,
10789+
int insn_idx)
10790+
{
10791+
10792+
callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
10793+
10794+
/* unused */
10795+
__mark_reg_not_init(env, &callee->regs[BPF_REG_2]);
10796+
__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
10797+
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
10798+
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
10799+
callee->in_callback_fn = true;
10800+
callee->callback_ret_range = retval_range(0, 1);
10801+
return 0;
10802+
}
10803+
1078410804
static bool is_rbtree_lock_required_kfunc(u32 btf_id);
1078510805

1078610806
/* Are we currently verifying the callback for a rbtree helper that must
@@ -12108,6 +12128,7 @@ enum special_kfunc_type {
1210812128
KF_bpf_res_spin_lock_irqsave,
1210912129
KF_bpf_res_spin_unlock_irqrestore,
1211012130
KF___bpf_trap,
12131+
KF_bpf_task_work_schedule,
1211112132
};
1211212133

1211312134
BTF_ID_LIST(special_kfunc_list)
@@ -12174,6 +12195,12 @@ BTF_ID(func, bpf_res_spin_unlock)
1217412195
BTF_ID(func, bpf_res_spin_lock_irqsave)
1217512196
BTF_ID(func, bpf_res_spin_unlock_irqrestore)
1217612197
BTF_ID(func, __bpf_trap)
12198+
BTF_ID(func, bpf_task_work_schedule)
12199+
12200+
static bool is_kfunc_callback_calling_function(enum bpf_func_id func_id)
12201+
{
12202+
return func_id == special_kfunc_list[KF_bpf_task_work_schedule];
12203+
}
1217712204

1217812205
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
1217912206
{
@@ -12608,7 +12635,7 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
1260812635

1260912636
static bool is_async_callback_calling_kfunc(u32 btf_id)
1261012637
{
12611-
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
12638+
return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl] || btf_id == special_kfunc_list[KF_bpf_task_work_schedule];
1261212639
}
1261312640

1261412641
static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
@@ -12861,7 +12888,7 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
1286112888

1286212889
static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
1286312890
int insn_idx)
12864-
{
12891+
{ // todo check
1286512892
const char *func_name = meta->func_name, *ref_tname;
1286612893
const struct btf *btf = meta->btf;
1286712894
const struct btf_param *args;
@@ -13666,6 +13693,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
1366613693
if (err < 0)
1366713694
return err;
1366813695

13696+
if (meta.func_id == special_kfunc_list[KF_bpf_task_work_schedule]) {
13697+
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
13698+
set_task_work_schedule_callback_state);
13699+
if (err) {
13700+
verbose(env, "kfunc %s#%d failed callback verification\n",
13701+
func_name, meta.func_id);
13702+
return err;
13703+
}
13704+
}
13705+
1366913706
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
1367013707
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
1367113708
set_rbtree_add_callback_state);
@@ -16700,7 +16737,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
1670016737
return 0;
1670116738
}
1670216739

16703-
if (insn->src_reg == BPF_PSEUDO_FUNC) {
16740+
if (insn->src_reg == BPF_PSEUDO_FUNC) { // todo check
1670416741
struct bpf_prog_aux *aux = env->prog->aux;
1670516742
u32 subprogno = find_subprog(env,
1670616743
env->insn_idx + insn->imm + 1);
@@ -20161,7 +20198,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
2016120198
}
2016220199

2016320200
if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
20164-
aux = &env->insn_aux_data[i];
20201+
aux = &env->insn_aux_data[i]; // todo check
2016520202
aux->ptr_type = PTR_TO_FUNC;
2016620203
goto next_insn;
2016720204
}
@@ -21202,7 +21239,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
2120221239
* now populate all bpf_calls with correct addresses and
2120321240
* run last pass of JIT
2120421241
*/
21205-
for (i = 0; i < env->subprog_cnt; i++) {
21242+
for (i = 0; i < env->subprog_cnt; i++) { // Check what this is doing
2120621243
insn = func[i]->insnsi;
2120721244
for (j = 0; j < func[i]->len; j++, insn++) {
2120821245
if (bpf_pseudo_func(insn)) {
@@ -21458,7 +21495,9 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2145821495
insn->imm = BPF_CALL_IMM(desc->addr);
2145921496
if (insn->off)
2146021497
return 0;
21461-
if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
21498+
if (desc->func_id == special_kfunc_list[KF_bpf_task_work_schedule]) {
21499+
printk("Can patch program here\n");
21500+
} else if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
2146221501
desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
2146321502
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
2146421503
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
@@ -21604,8 +21643,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
2160421643
mark_subprog_exc_cb(env, env->exception_callback_subprog);
2160521644
}
2160621645

21607-
for (i = 0; i < insn_cnt;) {
21608-
if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) {
21646+
for (i = 0; i < insn_cnt;) { // When i == 25 insns[27] is our instruction, i ==27 is call
21647+
if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) {
2160921648
if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) ||
2161021649
(((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
2161121650
/* convert to 32-bit mov that clears upper 32-bit */
@@ -24109,7 +24148,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
2410924148
ret = convert_ctx_accesses(env);
2411024149

2411124150
if (ret == 0)
24112-
ret = do_misc_fixups(env);
24151+
ret = do_misc_fixups(env); // kkl overwrites are here !!!!!!
2411324152

2411424153
/* do 32-bit optimization after insn patching has done so those patched
2411524154
* insns could be handled correctly.

tools/include/uapi/linux/bpf.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7375,6 +7375,10 @@ struct bpf_timer {
73757375
__u64 __opaque[2];
73767376
} __attribute__((aligned(8)));
73777377

7378+
struct bpf_task_work {
7379+
__u64 __opaque[2];
7380+
} __attribute__((aligned(8)));
7381+
73787382
struct bpf_wq {
73797383
__u64 __opaque[2];
73807384
} __attribute__((aligned(8)));
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3+
#include <test_progs.h>
4+
#include <string.h>
5+
#include <stdio.h>
6+
#include "task_work.skel.h"
7+
8+
static void test_task_work_run(void)
9+
{
10+
struct task_work *skel;
11+
struct bpf_program *prog;
12+
//struct bpf_link *link;
13+
char data[5000];
14+
int err, prog_fd;
15+
//int err;
16+
LIBBPF_OPTS(bpf_test_run_opts, opts,
17+
.data_in = &data,
18+
.data_size_in = sizeof(data),
19+
.repeat = 1,
20+
);
21+
22+
skel = task_work__open();
23+
if (!ASSERT_OK_PTR(skel, "task_work__open"))
24+
return;
25+
26+
err = task_work__load(skel);
27+
if (!ASSERT_OK(err, "task_work__load"))
28+
goto cleanup;
29+
30+
prog = bpf_object__find_program_by_name(skel->obj, "test_task_work");
31+
prog_fd = bpf_program__fd(prog);
32+
fprintf(stderr, "Running a program \n");
33+
err = bpf_prog_test_run_opts(prog_fd, &opts);
34+
sleep(20);
35+
if (!ASSERT_OK(err, "test_run"))
36+
goto cleanup;
37+
38+
fprintf(stderr, "Gooing to sleep \n");
39+
sleep(20);
40+
cleanup:
41+
task_work__destroy(skel);
42+
}
43+
44+
void test_task_work(void)
45+
{
46+
if (test__start_subtest("test_task_work_run"))
47+
test_task_work_run();
48+
}
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/* Copyright (c) 2022 Facebook */
3+
4+
#include <vmlinux.h>
5+
#include <string.h>
6+
#include <stdbool.h>
7+
#include <bpf/bpf_helpers.h>
8+
#include <bpf/bpf_tracing.h>
9+
#include "bpf_misc.h"
10+
#include "errno.h"
11+
12+
char _license[] SEC("license") = "GPL";
13+
14+
static __u64 test_cb(__u64 p)
15+
{
16+
bpf_printk("Hello map %u\n", p);
17+
return 0;
18+
}
19+
20+
volatile int cnt = 0;
21+
22+
SEC("xdp")
23+
int test_task_work(struct xdp_md *xdp)
24+
{
25+
bpf_task_work_schedule(test_cb);
26+
return XDP_PASS;
27+
}

0 commit comments

Comments
 (0)