@@ -517,9 +517,11 @@ static bool is_sync_callback_calling_function(enum bpf_func_id func_id)
517
517
func_id == BPF_FUNC_user_ringbuf_drain;
518
518
}
519
519
520
+ static bool is_kfunc_callback_calling_function(enum bpf_func_id func_id);
521
+
520
522
static bool is_async_callback_calling_function(enum bpf_func_id func_id)
521
523
{
522
- return func_id == BPF_FUNC_timer_set_callback;
524
+ return func_id == BPF_FUNC_timer_set_callback || is_kfunc_callback_calling_function(func_id) ;
523
525
}
524
526
525
527
static bool is_callback_calling_function(enum bpf_func_id func_id)
@@ -10781,6 +10783,24 @@ static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
10781
10783
return 0;
10782
10784
}
10783
10785
10786
+ static int set_task_work_schedule_callback_state(struct bpf_verifier_env *env,
10787
+ struct bpf_func_state *caller,
10788
+ struct bpf_func_state *callee,
10789
+ int insn_idx)
10790
+ {
10791
+
10792
+ callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
10793
+
10794
+ /* unused */
10795
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_2]);
10796
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
10797
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
10798
+ __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
10799
+ callee->in_callback_fn = true;
10800
+ callee->callback_ret_range = retval_range(0, 1);
10801
+ return 0;
10802
+ }
10803
+
10784
10804
static bool is_rbtree_lock_required_kfunc(u32 btf_id);
10785
10805
10786
10806
/* Are we currently verifying the callback for a rbtree helper that must
@@ -12108,6 +12128,7 @@ enum special_kfunc_type {
12108
12128
KF_bpf_res_spin_lock_irqsave,
12109
12129
KF_bpf_res_spin_unlock_irqrestore,
12110
12130
KF___bpf_trap,
12131
+ KF_bpf_task_work_schedule,
12111
12132
};
12112
12133
12113
12134
BTF_ID_LIST(special_kfunc_list)
@@ -12174,6 +12195,12 @@ BTF_ID(func, bpf_res_spin_unlock)
12174
12195
BTF_ID(func, bpf_res_spin_lock_irqsave)
12175
12196
BTF_ID(func, bpf_res_spin_unlock_irqrestore)
12176
12197
BTF_ID(func, __bpf_trap)
12198
+ BTF_ID(func, bpf_task_work_schedule)
12199
+
12200
+ static bool is_kfunc_callback_calling_function(enum bpf_func_id func_id)
12201
+ {
12202
+ return func_id == special_kfunc_list[KF_bpf_task_work_schedule];
12203
+ }
12177
12204
12178
12205
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
12179
12206
{
@@ -12608,7 +12635,7 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id)
12608
12635
12609
12636
static bool is_async_callback_calling_kfunc(u32 btf_id)
12610
12637
{
12611
- return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl];
12638
+ return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl] || btf_id == special_kfunc_list[KF_bpf_task_work_schedule] ;
12612
12639
}
12613
12640
12614
12641
static bool is_bpf_throw_kfunc(struct bpf_insn *insn)
@@ -12861,7 +12888,7 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
12861
12888
12862
12889
static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
12863
12890
int insn_idx)
12864
- {
12891
+ { // todo check
12865
12892
const char *func_name = meta->func_name, *ref_tname;
12866
12893
const struct btf *btf = meta->btf;
12867
12894
const struct btf_param *args;
@@ -13666,6 +13693,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
13666
13693
if (err < 0)
13667
13694
return err;
13668
13695
13696
+ if (meta.func_id == special_kfunc_list[KF_bpf_task_work_schedule]) {
13697
+ err = push_callback_call(env, insn, insn_idx, meta.subprogno,
13698
+ set_task_work_schedule_callback_state);
13699
+ if (err) {
13700
+ verbose(env, "kfunc %s#%d failed callback verification\n",
13701
+ func_name, meta.func_id);
13702
+ return err;
13703
+ }
13704
+ }
13705
+
13669
13706
if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
13670
13707
err = push_callback_call(env, insn, insn_idx, meta.subprogno,
13671
13708
set_rbtree_add_callback_state);
@@ -16700,7 +16737,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
16700
16737
return 0;
16701
16738
}
16702
16739
16703
- if (insn->src_reg == BPF_PSEUDO_FUNC) {
16740
+ if (insn->src_reg == BPF_PSEUDO_FUNC) { // todo check
16704
16741
struct bpf_prog_aux *aux = env->prog->aux;
16705
16742
u32 subprogno = find_subprog(env,
16706
16743
env->insn_idx + insn->imm + 1);
@@ -20161,7 +20198,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
20161
20198
}
20162
20199
20163
20200
if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
20164
- aux = &env->insn_aux_data[i];
20201
+ aux = &env->insn_aux_data[i]; // todo check
20165
20202
aux->ptr_type = PTR_TO_FUNC;
20166
20203
goto next_insn;
20167
20204
}
@@ -21202,7 +21239,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
21202
21239
* now populate all bpf_calls with correct addresses and
21203
21240
* run last pass of JIT
21204
21241
*/
21205
- for (i = 0; i < env->subprog_cnt; i++) {
21242
+ for (i = 0; i < env->subprog_cnt; i++) { // Check what this is doing
21206
21243
insn = func[i]->insnsi;
21207
21244
for (j = 0; j < func[i]->len; j++, insn++) {
21208
21245
if (bpf_pseudo_func(insn)) {
@@ -21458,7 +21495,9 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
21458
21495
insn->imm = BPF_CALL_IMM(desc->addr);
21459
21496
if (insn->off)
21460
21497
return 0;
21461
- if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
21498
+ if (desc->func_id == special_kfunc_list[KF_bpf_task_work_schedule]) {
21499
+ printk("Can patch program here\n");
21500
+ } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] ||
21462
21501
desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
21463
21502
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
21464
21503
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
@@ -21604,8 +21643,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
21604
21643
mark_subprog_exc_cb(env, env->exception_callback_subprog);
21605
21644
}
21606
21645
21607
- for (i = 0; i < insn_cnt;) {
21608
- if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) {
21646
+ for (i = 0; i < insn_cnt;) { // When i == 25 insns[27] is our instruction, i ==27 is call
21647
+ if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) {
21609
21648
if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) ||
21610
21649
(((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
21611
21650
/* convert to 32-bit mov that clears upper 32-bit */
@@ -24109,7 +24148,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
24109
24148
ret = convert_ctx_accesses(env);
24110
24149
24111
24150
if (ret == 0)
24112
- ret = do_misc_fixups(env);
24151
+ ret = do_misc_fixups(env); // kkl overwrites are here !!!!!!
24113
24152
24114
24153
/* do 32-bit optimization after insn patching has done so those patched
24115
24154
* insns could be handled correctly.
0 commit comments