diff --git a/bpf/process/bpf_generic_kprobe.c b/bpf/process/bpf_generic_kprobe.c index e958f79bacb..bf1ddb8d9d7 100644 --- a/bpf/process/bpf_generic_kprobe.c +++ b/bpf/process/bpf_generic_kprobe.c @@ -12,19 +12,10 @@ #include "retprobe_map.h" #include "types/operations.h" #include "types/basic.h" -#include "generic_calls.h" -#include "pfilter.h" #include "policy_filter.h" char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_generic_kprobe); -} process_call_heap SEC(".maps"); - int generic_kprobe_setup_event(void *ctx); int generic_kprobe_process_event(void *ctx); int generic_kprobe_process_filter(void *ctx); @@ -48,51 +39,8 @@ struct { }, }; -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 1); // will be resized by agent when needed - __type(key, __u64); - __type(value, __s32); -} override_tasks SEC(".maps"); - -#ifdef __LARGE_BPF_PROG -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_data); -} data_heap SEC(".maps"); -#define data_heap_ptr &data_heap -#else -#define data_heap_ptr 0 -#endif - -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -/* Arrays of size 1 will be rewritten to direct loads in verifier */ -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); - -static struct generic_maps maps = { - .heap = (struct bpf_map_def *)&process_call_heap, - .calls = (struct bpf_map_def *)&kprobe_calls, - .config = (struct bpf_map_def *)&config_map, - .filter = (struct bpf_map_def *)&filter_map, - .override = (struct bpf_map_def *)&override_tasks, -}; +#include "generic_maps.h" +#include "generic_calls.h" #ifdef __MULTI_KPROBE #define MAIN "kprobe.multi/generic_kprobe" @@ -128,27 +76,19 @@ static struct generic_maps maps = { __attribute__((section((MAIN)), used)) int generic_kprobe_event(struct pt_regs *ctx) { - return generic_start_process_filter(ctx, &maps); + return generic_start_process_filter(ctx, (struct bpf_map_def *)&kprobe_calls); } __attribute__((section("kprobe"), used)) int generic_kprobe_setup_event(void *ctx) { - return generic_process_event_and_setup( - ctx, (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&kprobe_calls, - (struct bpf_map_def *)&config_map, - (struct bpf_map_def *)data_heap_ptr); + return generic_process_event_and_setup(ctx, (struct bpf_map_def *)&kprobe_calls); } __attribute__((section("kprobe"), used)) int generic_kprobe_process_event(void *ctx) { - return generic_process_event(ctx, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&kprobe_calls, - (struct bpf_map_def *)&config_map, - (struct bpf_map_def *)data_heap_ptr); + return generic_process_event(ctx, (struct bpf_map_def *)&kprobe_calls); } __attribute__((section("kprobe"), used)) int @@ -156,8 +96,7 @@ generic_kprobe_process_filter(void *ctx) { int ret; - ret = generic_process_filter((struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&filter_map); + ret = generic_process_filter(); if (ret == PFILTER_CONTINUE) tail_call(ctx, &kprobe_calls, TAIL_CALL_FILTER); else if (ret == PFILTER_ACCEPT) @@ -171,24 +110,20 @@ generic_kprobe_process_filter(void *ctx) __attribute__((section("kprobe"), used)) int generic_kprobe_filter_arg(void *ctx) { - return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&filter_map, - (struct bpf_map_def *)&kprobe_calls, - (struct bpf_map_def *)&config_map, - true); + return generic_filter_arg(ctx, (struct bpf_map_def *)&kprobe_calls, true); } __attribute__((section("kprobe"), used)) int generic_kprobe_actions(void *ctx) { - generic_actions(ctx, &maps); + generic_actions(ctx, (struct bpf_map_def *)&kprobe_calls); return 0; } __attribute__((section("kprobe"), used)) int generic_kprobe_output(void *ctx) { - return generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_KPROBE); + return generic_output(ctx, MSG_OP_GENERIC_KPROBE); } __attribute__((section(OVERRIDE), used)) int diff --git a/bpf/process/bpf_generic_lsm_core.c b/bpf/process/bpf_generic_lsm_core.c index 871803a1403..9f8ffcf049e 100644 --- a/bpf/process/bpf_generic_lsm_core.c +++ b/bpf/process/bpf_generic_lsm_core.c @@ -15,17 +15,9 @@ #include "retprobe_map.h" #include "types/operations.h" #include "types/basic.h" -#include "generic_calls.h" char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_generic_kprobe); -} process_call_heap SEC(".maps"); - int generic_lsm_setup_event(void *ctx); int generic_lsm_process_event(void *ctx); int generic_lsm_process_filter(void *ctx); @@ -47,78 +39,27 @@ struct { }, }; -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 32768); - __type(key, __u64); - __type(value, __s32); -} override_tasks SEC(".maps"); - -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -/* Arrays of size 1 will be rewritten to direct loads in verifier */ -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); - -#ifdef __LARGE_BPF_PROG -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_data); -} data_heap SEC(".maps"); -#define data_heap_ptr &data_heap -#else -#define data_heap_ptr 0 -#endif - -static struct generic_maps maps = { - .heap = (struct bpf_map_def *)&process_call_heap, - .calls = (struct bpf_map_def *)&lsm_calls, - .config = (struct bpf_map_def *)&config_map, - .filter = (struct bpf_map_def *)&filter_map, - .override = (struct bpf_map_def *)&override_tasks, -}; +#include "generic_maps.h" +#include "generic_calls.h" #define MAIN "lsm/generic_lsm_core" __attribute__((section((MAIN)), used)) int generic_lsm_event(struct pt_regs *ctx) { - return generic_start_process_filter(ctx, &maps); + return generic_start_process_filter(ctx, (struct bpf_map_def *)&lsm_calls); } __attribute__((section("lsm"), used)) int generic_lsm_setup_event(void *ctx) { - return generic_process_event_and_setup( - ctx, (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&lsm_calls, - (struct bpf_map_def *)&config_map, - (struct bpf_map_def *)data_heap_ptr); + return generic_process_event_and_setup(ctx, (struct bpf_map_def *)&lsm_calls); } __attribute__((section("lsm"), used)) int generic_lsm_process_event(void *ctx) { - return generic_process_event(ctx, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&lsm_calls, - (struct bpf_map_def *)&config_map, - (struct bpf_map_def *)data_heap_ptr); + return generic_process_event(ctx, (struct bpf_map_def *)&lsm_calls); } __attribute__((section("lsm"), used)) int @@ -126,8 +67,7 @@ generic_lsm_process_filter(void *ctx) { int ret; - ret = generic_process_filter((struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&filter_map); + ret = generic_process_filter(); if (ret == PFILTER_CONTINUE) tail_call(ctx, &lsm_calls, TAIL_CALL_FILTER); else if (ret == PFILTER_ACCEPT) @@ -138,17 +78,13 @@ generic_lsm_process_filter(void *ctx) __attribute__((section("lsm"), used)) int generic_lsm_filter_arg(void *ctx) { - return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&filter_map, - (struct bpf_map_def *)&lsm_calls, - (struct bpf_map_def *)&config_map, - true); + return generic_filter_arg(ctx, (struct bpf_map_def *)&lsm_calls, true); } __attribute__((section("lsm"), used)) int generic_lsm_actions(void *ctx) { - bool postit = generic_actions(ctx, &maps); + bool postit = generic_actions(ctx, (struct bpf_map_def *)&lsm_calls); struct msg_generic_kprobe *e; int zero = 0; diff --git a/bpf/process/bpf_generic_lsm_ima_bprm.c b/bpf/process/bpf_generic_lsm_ima_bprm.c index 5697a5a2a83..f802cdbb9d0 100644 --- a/bpf/process/bpf_generic_lsm_ima_bprm.c +++ b/bpf/process/bpf_generic_lsm_ima_bprm.c @@ -12,27 +12,10 @@ #include "bpf_lsm_ima.h" #include "retprobe_map.h" #include "types/basic.h" +#include "generic_maps.h" char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); - __attribute__((section("lsm.s/generic_lsm_ima_bprm"), used)) int BPF_PROG(ima_bprm, struct linux_binprm *bprm) { diff --git a/bpf/process/bpf_generic_lsm_ima_file.c b/bpf/process/bpf_generic_lsm_ima_file.c index ed98a6e9f79..c0d52a00236 100644 --- a/bpf/process/bpf_generic_lsm_ima_file.c +++ b/bpf/process/bpf_generic_lsm_ima_file.c @@ -12,27 +12,10 @@ #include "bpf_lsm_ima.h" #include "retprobe_map.h" #include "types/basic.h" +#include "generic_maps.h" char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); - __attribute__((section("lsm.s/generic_lsm_ima_file"), used)) int BPF_PROG(ima_file, struct file *file) { diff --git a/bpf/process/bpf_generic_lsm_output.c b/bpf/process/bpf_generic_lsm_output.c index 886ae61b7d3..9f4b144fb3a 100644 --- a/bpf/process/bpf_generic_lsm_output.c +++ b/bpf/process/bpf_generic_lsm_output.c @@ -14,41 +14,12 @@ #include "bpf_task.h" #include "retprobe_map.h" #include "types/basic.h" +#include "generic_maps.h" -char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; - -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_generic_kprobe); -} process_call_heap SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __uint(max_entries, 32768); - __type(key, __u64); - __type(value, __s32); -} override_tasks SEC(".maps"); +#include "generic_maps.h" +#include "generic_calls.h" -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -/* Arrays of size 1 will be rewritten to direct loads in verifier */ -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); +char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; __attribute__((section("lsm/generic_lsm_output"), used)) int generic_lsm_output(void *ctx) @@ -75,6 +46,6 @@ generic_lsm_output(void *ctx) } #endif if (e->lsm.post) - generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_LSM); + generic_output(ctx, MSG_OP_GENERIC_LSM); return try_override(ctx, (struct bpf_map_def *)&override_tasks); } diff --git a/bpf/process/bpf_generic_retkprobe.c b/bpf/process/bpf_generic_retkprobe.c index 4e99dcade70..160639a9816 100644 --- a/bpf/process/bpf_generic_retkprobe.c +++ b/bpf/process/bpf_generic_retkprobe.c @@ -17,13 +17,6 @@ char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_generic_kprobe); -} process_call_heap SEC(".maps"); - int generic_retkprobe_filter_arg(struct pt_regs *ctx); int generic_retkprobe_actions(struct pt_regs *ctx); int generic_retkprobe_output(struct pt_regs *ctx); @@ -41,35 +34,8 @@ struct { }, }; -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); - -#ifdef __LARGE_BPF_PROG -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_data); -} data_heap SEC(".maps"); -#define data_heap_ptr &data_heap -#else -#define data_heap_ptr 0 -#endif +#include "generic_maps.h" +#include "generic_calls.h" #ifdef __MULTI_KPROBE #define MAIN "kprobe.multi/generic_retkprobe" @@ -77,129 +43,27 @@ struct { #define MAIN "kprobe/generic_retkprobe" #endif -static struct generic_maps maps = { - .heap = (struct bpf_map_def *)&process_call_heap, - .calls = (struct bpf_map_def *)&retkprobe_calls, - .filter = (struct bpf_map_def *)&filter_map, -}; - __attribute__((section((MAIN)), used)) int BPF_KRETPROBE(generic_retkprobe_event, unsigned long ret) { - struct execve_map_value *enter; - struct msg_generic_kprobe *e; - struct retprobe_info info; - struct event_config *config; - bool walker = false; - int zero = 0; - __u32 ppid; - long size = 0; - long ty_arg, do_copy; - __u64 pid_tgid; - - e = map_lookup_elem(&process_call_heap, &zero); - if (!e) - return 0; - - e->idx = get_index(ctx); - - config = map_lookup_elem(&config_map, &e->idx); - if (!config) - return 0; - - e->func_id = config->func_id; - e->retprobe_id = retprobe_map_get_key(ctx); - pid_tgid = get_current_pid_tgid(); - e->tid = (__u32)pid_tgid; - - if (!retprobe_map_get(e->func_id, e->retprobe_id, &info)) - return 0; - - *(unsigned long *)e->args = info.ktime_enter; - size += sizeof(info.ktime_enter); - - ty_arg = config->argreturn; - do_copy = config->argreturncopy; - if (ty_arg) { - size += read_call_arg(ctx, e, 0, ty_arg, size, ret, 0, (struct bpf_map_def *)data_heap_ptr); -#ifdef __LARGE_BPF_PROG - struct socket_owner owner; - switch (config->argreturnaction) { - case ACTION_TRACKSOCK: - owner.pid = e->current.pid; - owner.tid = e->tid; - owner.ktime = e->current.ktime; - map_update_elem(&socktrack_map, &ret, &owner, BPF_ANY); - break; - case ACTION_UNTRACKSOCK: - map_delete_elem(&socktrack_map, &ret); - break; - } -#endif - } - - /* - * 0x1000 should be maximum argument length, so masking - * with 0x1fff is safe and verifier will be happy. - */ - asm volatile("%[size] &= 0x1fff;\n" - : [size] "+r"(size)); - - switch (do_copy) { - case char_buf: - size += __copy_char_buf(ctx, size, info.ptr, ret, false, e, (struct bpf_map_def *)data_heap_ptr); - break; - case char_iovec: - size += __copy_char_iovec(size, info.ptr, info.cnt, ret, e); - default: - break; - } - - /* Complete message header and send */ - enter = event_find_curr(&ppid, &walker); - - e->common.op = MSG_OP_GENERIC_KPROBE; - e->common.flags |= MSG_COMMON_FLAG_RETURN; - e->common.pad[0] = 0; - e->common.pad[1] = 0; - e->common.size = size; - e->common.ktime = ktime_get_ns(); - - if (enter) { - e->current.pid = enter->key.pid; - e->current.ktime = enter->key.ktime; - } - e->current.pad[0] = 0; - e->current.pad[1] = 0; - e->current.pad[2] = 0; - e->current.pad[3] = 0; - - e->func_id = config->func_id; - e->common.size = size; - - tail_call(ctx, &retkprobe_calls, TAIL_CALL_ARGS); - return 1; + return generic_retkprobe(ctx, (struct bpf_map_def *)&retkprobe_calls, ret); } __attribute__((section("kprobe"), used)) int BPF_KRETPROBE(generic_retkprobe_filter_arg) { - return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&filter_map, - (struct bpf_map_def *)&retkprobe_calls, - (struct bpf_map_def *)&config_map, - false); + return generic_filter_arg(ctx, (struct bpf_map_def *)&retkprobe_calls, false); } __attribute__((section("kprobe"), used)) int BPF_KRETPROBE(generic_retkprobe_actions) { - generic_actions(ctx, &maps); + generic_actions(ctx, (struct bpf_map_def *)&retkprobe_calls); return 0; } __attribute__((section("kprobe"), used)) int BPF_KRETPROBE(generic_retkprobe_output) { - return generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_KPROBE); + return generic_output(ctx, MSG_OP_GENERIC_KPROBE); } diff --git a/bpf/process/bpf_generic_tracepoint.c b/bpf/process/bpf_generic_tracepoint.c index f8b8bf2eab9..a981998f9c8 100644 --- a/bpf/process/bpf_generic_tracepoint.c +++ b/bpf/process/bpf_generic_tracepoint.c @@ -13,8 +13,6 @@ #include "retprobe_map.h" #include "types/operations.h" #include "types/basic.h" -#include "generic_calls.h" -#include "pfilter.h" #include "policy_filter.h" #include "syscall64.h" @@ -39,37 +37,8 @@ struct { }, }; -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_generic_kprobe); -} tp_heap SEC(".maps"); - -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -/* Arrays of size 1 will be rewritten to direct loads in verifier */ -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); - -static struct generic_maps maps = { - .heap = (struct bpf_map_def *)&tp_heap, - .calls = (struct bpf_map_def *)&tp_calls, - .filter = (struct bpf_map_def *)&filter_map, -}; +#include "generic_maps.h" +#include "generic_calls.h" struct generic_tracepoint_event_arg { /* common header */ @@ -165,7 +134,7 @@ generic_tracepoint_event(struct generic_tracepoint_event_arg *ctx) struct event_config *config; int zero = 0, i; - msg = map_lookup_elem(&tp_heap, &zero); + msg = map_lookup_elem(&process_call_heap, &zero); if (!msg) return 0; @@ -250,9 +219,7 @@ generic_tracepoint_event(struct generic_tracepoint_event_arg *ctx) __attribute__((section("tracepoint"), used)) int generic_tracepoint_process_event(void *ctx) { - return generic_process_event(ctx, (struct bpf_map_def *)&tp_heap, - (struct bpf_map_def *)&tp_calls, - (struct bpf_map_def *)&config_map, 0); + return generic_process_event(ctx, (struct bpf_map_def *)&tp_calls); } __attribute__((section("tracepoint"), used)) int @@ -260,8 +227,7 @@ generic_tracepoint_filter(void *ctx) { int ret; - ret = generic_process_filter((struct bpf_map_def *)&tp_heap, - (struct bpf_map_def *)&filter_map); + ret = generic_process_filter(); if (ret == PFILTER_CONTINUE) tail_call(ctx, &tp_calls, TAIL_CALL_FILTER); else if (ret == PFILTER_ACCEPT) @@ -275,24 +241,20 @@ generic_tracepoint_filter(void *ctx) __attribute__((section("tracepoint"), used)) int generic_tracepoint_arg(void *ctx) { - return filter_read_arg(ctx, (struct bpf_map_def *)&tp_heap, - (struct bpf_map_def *)&filter_map, - (struct bpf_map_def *)&tp_calls, - (struct bpf_map_def *)&config_map, - true); + return generic_filter_arg(ctx, (struct bpf_map_def *)&tp_calls, true); } __attribute__((section("tracepoint"), used)) int generic_tracepoint_actions(void *ctx) { - generic_actions(ctx, &maps); + generic_actions(ctx, (struct bpf_map_def *)&tp_calls); return 0; } __attribute__((section("tracepoint"), used)) int generic_tracepoint_output(void *ctx) { - return generic_output(ctx, (struct bpf_map_def *)&tp_heap, MSG_OP_GENERIC_TRACEPOINT); + return generic_output(ctx, MSG_OP_GENERIC_TRACEPOINT); } char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; diff --git a/bpf/process/bpf_generic_uprobe.c b/bpf/process/bpf_generic_uprobe.c index e6991f052bd..0a9d85bb280 100644 --- a/bpf/process/bpf_generic_uprobe.c +++ b/bpf/process/bpf_generic_uprobe.c @@ -12,18 +12,9 @@ #include "retprobe_map.h" #include "types/operations.h" #include "types/basic.h" -#include "generic_calls.h" -#include "pfilter.h" char _license[] __attribute__((section("license"), used)) = "Dual BSD/GPL"; -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct msg_generic_kprobe); -} process_call_heap SEC(".maps"); - int generic_uprobe_setup_event(void *ctx); int generic_uprobe_process_event(void *ctx); int generic_uprobe_process_filter(void *ctx); @@ -47,31 +38,8 @@ struct { }, }; -struct filter_map_value { - unsigned char buf[FILTER_SIZE]; -}; - -/* Arrays of size 1 will be rewritten to direct loads in verifier */ -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, int); - __type(value, struct filter_map_value); -} filter_map SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 1); - __type(key, __u32); - __type(value, struct event_config); -} config_map SEC(".maps"); - -static struct generic_maps maps = { - .heap = (struct bpf_map_def *)&process_call_heap, - .calls = (struct bpf_map_def *)&uprobe_calls, - .config = (struct bpf_map_def *)&config_map, - .filter = (struct bpf_map_def *)&filter_map, -}; +#include "generic_maps.h" +#include "generic_calls.h" #ifdef __MULTI_KPROBE #define MAIN "uprobe.multi/generic_uprobe" @@ -82,25 +50,19 @@ static struct generic_maps maps = { __attribute__((section((MAIN)), used)) int generic_uprobe_event(struct pt_regs *ctx) { - return generic_start_process_filter(ctx, &maps); + return generic_start_process_filter(ctx, (struct bpf_map_def *)&uprobe_calls); } __attribute__((section("uprobe"), used)) int generic_uprobe_setup_event(void *ctx) { - return generic_process_event_and_setup( - ctx, (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&uprobe_calls, - (struct bpf_map_def *)&config_map, 0); + return generic_process_event_and_setup(ctx, (struct bpf_map_def *)&uprobe_calls); } __attribute__((section("uprobe"), used)) int generic_uprobe_process_event(void *ctx) { - return generic_process_event(ctx, - (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&uprobe_calls, - (struct bpf_map_def *)&config_map, 0); + return generic_process_event(ctx, (struct bpf_map_def *)&uprobe_calls); } __attribute__((section("uprobe"), used)) int @@ -108,8 +70,7 @@ generic_uprobe_process_filter(void *ctx) { int ret; - ret = generic_process_filter((struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&filter_map); + ret = generic_process_filter(); if (ret == PFILTER_CONTINUE) tail_call(ctx, &uprobe_calls, TAIL_CALL_FILTER); else if (ret == PFILTER_ACCEPT) @@ -123,22 +84,18 @@ generic_uprobe_process_filter(void *ctx) __attribute__((section("uprobe"), used)) int generic_uprobe_filter_arg(void *ctx) { - return filter_read_arg(ctx, (struct bpf_map_def *)&process_call_heap, - (struct bpf_map_def *)&filter_map, - (struct bpf_map_def *)&uprobe_calls, - (struct bpf_map_def *)&config_map, - true); + return generic_filter_arg(ctx, (struct bpf_map_def *)&uprobe_calls, true); } __attribute__((section("uprobe"), used)) int generic_uprobe_actions(void *ctx) { - generic_actions(ctx, &maps); + generic_actions(ctx, (struct bpf_map_def *)&uprobe_calls); return 0; } __attribute__((section("uprobe"), used)) int generic_uprobe_output(void *ctx) { - return generic_output(ctx, (struct bpf_map_def *)&process_call_heap, MSG_OP_GENERIC_UPROBE); + return generic_output(ctx, MSG_OP_GENERIC_UPROBE); } diff --git a/bpf/process/generic_calls.h b/bpf/process/generic_calls.h index 6bbf2877561..5f0c5042abd 100644 --- a/bpf/process/generic_calls.h +++ b/bpf/process/generic_calls.h @@ -13,20 +13,20 @@ #define MAX_TOTAL 9000 FUNC_INLINE int -generic_start_process_filter(void *ctx, struct generic_maps *maps) +generic_start_process_filter(void *ctx, struct bpf_map_def *calls) { struct msg_generic_kprobe *msg; struct event_config *config; struct task_struct *task; int i, zero = 0; - msg = map_lookup_elem(maps->heap, &zero); + msg = map_lookup_elem(&process_call_heap, &zero); if (!msg) return 0; /* setup index, check policy filter, and setup function id */ msg->idx = get_index(ctx); - config = map_lookup_elem(maps->config, &msg->idx); + config = map_lookup_elem(&config_map, &msg->idx); if (!config) return 0; if (!policy_filter_check(config->policy_id)) @@ -58,14 +58,12 @@ generic_start_process_filter(void *ctx, struct generic_maps *maps) msg->lsm.post = false; /* Tail call into filters. */ - tail_call(ctx, maps->calls, TAIL_CALL_FILTER); + tail_call(ctx, calls, TAIL_CALL_FILTER); return 0; } FUNC_INLINE int -generic_process_event(void *ctx, struct bpf_map_def *heap_map, - struct bpf_map_def *tailcals, struct bpf_map_def *config_map, - struct bpf_map_def *data_heap) +generic_process_event(void *ctx, struct bpf_map_def *tailcals) { struct msg_generic_kprobe *e; struct event_config *config; @@ -73,11 +71,11 @@ generic_process_event(void *ctx, struct bpf_map_def *heap_map, unsigned long a; long ty, total; - e = map_lookup_elem(heap_map, &zero); + e = map_lookup_elem(&process_call_heap, &zero); if (!e) return 0; - config = map_lookup_elem(config_map, &e->idx); + config = map_lookup_elem(&config_map, &e->idx); if (!config) return 0; @@ -99,7 +97,7 @@ generic_process_event(void *ctx, struct bpf_map_def *heap_map, asm volatile("%[am] &= 0xffff;\n" : [am] "+r"(am)); - errv = read_call_arg(ctx, e, index, ty, total, a, am, data_heap); + errv = read_call_arg(ctx, e, index, ty, total, a, am, data_heap_ptr); if (errv > 0) total += errv; /* Follow filter lookup failed so lets abort the event. @@ -149,11 +147,7 @@ generic_process_init(struct msg_generic_kprobe *e, u8 op, struct event_config *c } FUNC_INLINE int -generic_process_event_and_setup(struct pt_regs *ctx, - struct bpf_map_def *heap_map, - struct bpf_map_def *tailcals, - struct bpf_map_def *config_map, - struct bpf_map_def *data_heap) +generic_process_event_and_setup(struct pt_regs *ctx, struct bpf_map_def *tailcals) { struct msg_generic_kprobe *e; struct event_config *config; @@ -161,11 +155,11 @@ generic_process_event_and_setup(struct pt_regs *ctx, long ty __maybe_unused; /* Pid/Ktime Passed through per cpu map in process heap. */ - e = map_lookup_elem(heap_map, &zero); + e = map_lookup_elem(&process_call_heap, &zero); if (!e) return 0; - config = map_lookup_elem(config_map, &e->idx); + config = map_lookup_elem(&config_map, &e->idx); if (!config) return 0; @@ -219,7 +213,464 @@ generic_process_event_and_setup(struct pt_regs *ctx, generic_process_init(e, MSG_OP_GENERIC_UPROBE, config); #endif - return generic_process_event(ctx, heap_map, tailcals, config_map, data_heap); + return generic_process_event(ctx, tailcals); } +FUNC_LOCAL __u32 +do_action(void *ctx, __u32 i, struct selector_action *actions, bool *post) +{ + int signal __maybe_unused = FGS_SIGKILL; + int action = actions->act[i]; + struct msg_generic_kprobe *e; + __s32 error, *error_p; + int fdi, namei; + int newfdi, oldfdi; + int socki; + int argi __maybe_unused; + int err = 0; + int zero = 0; + __u64 id; + + e = map_lookup_elem(&process_call_heap, &zero); + if (!e) + return 0; + + switch (action) { + case ACTION_NOPOST: + *post = false; + break; + case ACTION_POST: { + __u64 ratelimit_interval __maybe_unused = actions->act[++i]; + __u64 ratelimit_scope __maybe_unused = actions->act[++i]; +#ifdef __LARGE_BPF_PROG + if (rate_limit(ratelimit_interval, ratelimit_scope, e)) + *post = false; +#endif /* __LARGE_BPF_PROG */ + __u32 kernel_stack_trace = actions->act[++i]; + + if (kernel_stack_trace) { + // Stack id 0 is valid so we need a flag. + e->common.flags |= MSG_COMMON_FLAG_KERNEL_STACKTRACE; + // We could use BPF_F_REUSE_STACKID to override old with new stack if + // same stack id. It means that if we have a collision and user space + // reads the old one too late, we are reading the wrong stack (the new, + // old one was overwritten). + // + // Here we just signal that there was a collision returning -EEXIST. + e->kernel_stack_id = get_stackid(ctx, &stack_trace_map, 0); + } + + __u32 user_stack_trace = actions->act[++i]; + + if (user_stack_trace) { + e->common.flags |= MSG_COMMON_FLAG_USER_STACKTRACE; + e->user_stack_id = get_stackid(ctx, &stack_trace_map, BPF_F_USER_STACK); + } +#ifdef __LARGE_MAP_KEYS + __u32 ima_hash = actions->act[++i]; + + if (ima_hash) + e->common.flags |= MSG_COMMON_FLAG_IMA_HASH; +#endif + break; + } + + case ACTION_UNFOLLOWFD: + case ACTION_FOLLOWFD: + fdi = actions->act[++i]; + namei = actions->act[++i]; + err = installfd(e, fdi, namei, action == ACTION_FOLLOWFD); + break; + case ACTION_COPYFD: + oldfdi = actions->act[++i]; + newfdi = actions->act[++i]; + err = copyfd(e, oldfdi, newfdi); + break; + case ACTION_SIGNAL: + signal = actions->act[++i]; + case ACTION_SIGKILL: + do_action_signal(signal); + break; + case ACTION_OVERRIDE: + error = actions->act[++i]; + id = get_current_pid_tgid(); + + /* + * TODO: this should not happen, it means that the override + * program was not executed for some reason, we should do + * warning in here + */ + error_p = map_lookup_elem(&override_tasks, &id); + if (error_p) + *error_p = error; + else + map_update_elem(&override_tasks, &id, &error, BPF_ANY); + break; + case ACTION_GETURL: + case ACTION_DNSLOOKUP: + /* Set the URL or DNS action */ + e->action_arg_id = actions->act[++i]; + break; + case ACTION_TRACKSOCK: + case ACTION_UNTRACKSOCK: + socki = actions->act[++i]; + err = tracksock(e, socki, action == ACTION_TRACKSOCK); + break; + case ACTION_NOTIFY_ENFORCER: + error = actions->act[++i]; + signal = actions->act[++i]; + argi = actions->act[++i]; + do_action_notify_enforcer(e, error, signal, argi); + break; + case ACTION_CLEANUP_ENFORCER_NOTIFICATION: + do_enforcer_cleanup(); + default: + break; + } + if (!err) { + e->action = action; + return ++i; + } + return 0; +} + +FUNC_INLINE bool +has_action(struct selector_action *actions, __u32 idx) +{ + __u32 offset = idx * sizeof(__u32) + sizeof(*actions); + + return offset < actions->actionlen; +} + +/* Currently supporting 2 actions for selector. */ +FUNC_INLINE bool +do_actions(void *ctx, struct selector_action *actions) +{ + bool post = true; + __u32 l, i = 0; + +#ifndef __LARGE_BPF_PROG +#pragma unroll +#endif + for (l = 0; l < MAX_ACTIONS; l++) { + if (!has_action(actions, i)) + break; + i = do_action(ctx, i, actions, &post); + } + + return post; +} + +FUNC_INLINE long +generic_actions(void *ctx, struct bpf_map_def *calls) +{ + struct selector_arg_filters *arg; + struct selector_action *actions; + struct msg_generic_kprobe *e; + int actoff, pass, zero = 0; + bool postit; + __u8 *f; + + e = map_lookup_elem(&process_call_heap, &zero); + if (!e) + return 0; + + pass = e->pass; + if (pass <= 1) + return 0; + + f = map_lookup_elem(&filter_map, &e->idx); + if (!f) + return 0; + + asm volatile("%[pass] &= 0x7ff;\n" + : [pass] "+r"(pass) + :); + arg = (struct selector_arg_filters *)&f[pass]; + + actoff = pass + arg->arglen; + asm volatile("%[actoff] &= 0x7ff;\n" + : [actoff] "+r"(actoff) + :); + actions = (struct selector_action *)&f[actoff]; + + postit = do_actions(ctx, actions); + if (postit) + tail_call(ctx, calls, TAIL_CALL_SEND); + return postit; +} + +FUNC_INLINE long +generic_output(void *ctx, u8 op) +{ + struct msg_generic_kprobe *e; + int zero = 0; + size_t total; + + e = map_lookup_elem(&process_call_heap, &zero); + if (!e) + return 0; + +/* We don't need this data in return kprobe event */ +#ifndef GENERIC_KRETPROBE +#ifdef __NS_CHANGES_FILTER + /* update the namespaces if we matched a change on that */ + if (e->sel.match_ns) { + __u32 pid = (get_current_pid_tgid() >> 32); + struct task_struct *task = + (struct task_struct *)get_current_task(); + struct execve_map_value *enter = execve_map_get_noinit( + pid); // we don't want to init that if it does not exist + if (enter) + get_namespaces(&enter->ns, task); + } +#endif +#ifdef __CAP_CHANGES_FILTER + /* update the capabilities if we matched a change on that */ + if (e->sel.match_cap) { + __u32 pid = (get_current_pid_tgid() >> 32); + struct task_struct *task = + (struct task_struct *)get_current_task(); + struct execve_map_value *enter = execve_map_get_noinit( + pid); // we don't want to init that if it does not exist + if (enter) + get_current_subj_caps(&enter->caps, task); + } +#endif +#endif // !GENERIC_KRETPROBE + + total = e->common.size + generic_kprobe_common_size(); + /* Code movement from clang forces us to inline bounds checks here */ + asm volatile("%[total] &= 0x7fff;\n" + "if %[total] < 9000 goto +1\n;" + "%[total] = 9000;\n" + : [total] "+r"(total)); + perf_event_output_metric(ctx, op, &tcpmon_map, BPF_F_CURRENT_CPU, e, total); + return 0; +} + +FUNC_INLINE int generic_retkprobe(void *ctx, struct bpf_map_def *calls, unsigned long ret) +{ + struct execve_map_value *enter; + struct msg_generic_kprobe *e; + struct retprobe_info info; + struct event_config *config; + bool walker = false; + int zero = 0; + __u32 ppid; + long size = 0; + long ty_arg, do_copy; + __u64 pid_tgid; + + e = map_lookup_elem(&process_call_heap, &zero); + if (!e) + return 0; + + e->idx = get_index(ctx); + + config = map_lookup_elem(&config_map, &e->idx); + if (!config) + return 0; + + e->func_id = config->func_id; + e->retprobe_id = retprobe_map_get_key(ctx); + pid_tgid = get_current_pid_tgid(); + e->tid = (__u32)pid_tgid; + + if (!retprobe_map_get(e->func_id, e->retprobe_id, &info)) + return 0; + + *(unsigned long *)e->args = info.ktime_enter; + size += sizeof(info.ktime_enter); + + ty_arg = config->argreturn; + do_copy = config->argreturncopy; + if (ty_arg) { + size += read_call_arg(ctx, e, 0, ty_arg, size, ret, 0, data_heap_ptr); +#ifdef __LARGE_BPF_PROG + struct socket_owner owner; + + switch (config->argreturnaction) { + case ACTION_TRACKSOCK: + owner.pid = e->current.pid; + owner.tid = e->tid; + owner.ktime = e->current.ktime; + map_update_elem(&socktrack_map, &ret, &owner, BPF_ANY); + break; + case ACTION_UNTRACKSOCK: + map_delete_elem(&socktrack_map, &ret); + break; + } +#endif + } + + /* + * 0x1000 should be maximum argument length, so masking + * with 0x1fff is safe and verifier will be happy. + */ + asm volatile("%[size] &= 0x1fff;\n" + : [size] "+r"(size)); + + switch (do_copy) { + case char_buf: + size += __copy_char_buf(ctx, size, info.ptr, ret, false, e, data_heap_ptr); + break; + case char_iovec: + size += __copy_char_iovec(size, info.ptr, info.cnt, ret, e); + default: + break; + } + + /* Complete message header and send */ + enter = event_find_curr(&ppid, &walker); + + e->common.op = MSG_OP_GENERIC_KPROBE; + e->common.flags |= MSG_COMMON_FLAG_RETURN; + e->common.pad[0] = 0; + e->common.pad[1] = 0; + e->common.size = size; + e->common.ktime = ktime_get_ns(); + + if (enter) { + e->current.pid = enter->key.pid; + e->current.ktime = enter->key.ktime; + } + e->current.pad[0] = 0; + e->current.pad[1] = 0; + e->current.pad[2] = 0; + e->current.pad[3] = 0; + + e->func_id = config->func_id; + e->common.size = size; + + tail_call(ctx, calls, TAIL_CALL_ARGS); + return 1; +} + +// generic_process_filter performs first pass filtering based on pid/nspid. +// We keep a list of selectors that pass. +// +// if filter check was successful, it will return PFILTER_ACCEPT and properly +// set the values of: +// current->pid +// current->ktime +// for the memory located at index 0 of @msg_heap assuming the value follows the +// msg_generic_hdr structure. +FUNC_INLINE int generic_process_filter(void) +{ + struct execve_map_value *enter; + struct msg_generic_kprobe *msg; + struct msg_execve_key *current; + struct msg_selector_data *sel; + int curr, zero = 0; + bool walker = 0; + __u32 ppid; + + msg = map_lookup_elem(&process_call_heap, &zero); + if (!msg) + return 0; + + enter = event_find_curr(&ppid, &walker); + if (enter) { + int selectors, pass; + __u32 *f = map_lookup_elem(&filter_map, &msg->idx); + + if (!f) + return PFILTER_ERROR; + + sel = &msg->sel; + current = &msg->current; + + curr = sel->curr; + if (curr > MAX_SELECTORS) + return process_filter_done(sel, enter, current); + + selectors = f[0]; + /* If no selectors accept process */ + if (!selectors) { + sel->pass = true; + return process_filter_done(sel, enter, current); + } + + /* If we get here with reference to uninitialized selector drop */ + if (selectors <= curr) + return process_filter_done(sel, enter, current); + + pass = selector_process_filter(f, curr, enter, msg); + if (pass) { + /* Verify lost that msg is not null here so recheck */ + asm volatile("%[curr] &= 0x1f;\n" + : [curr] "+r"(curr)); + sel->active[curr] = true; + sel->active[SELECTORS_ACTIVE] = true; + sel->pass |= true; + } + sel->curr++; + if (sel->curr > selectors) + return process_filter_done(sel, enter, current); + return PFILTER_CONTINUE; /* will iterate to the next selector */ + } + return PFILTER_CURR_NOT_FOUND; +} + +FUNC_INLINE int filter_args(struct msg_generic_kprobe *e, int selidx, bool is_entry) +{ + __u8 *f; + + /* No filters and no selectors so just accepts */ + f = map_lookup_elem(&filter_map, &e->idx); + if (!f) + return 1; + + /* No selectors, accept by default */ + if (!e->sel.active[SELECTORS_ACTIVE]) + return 1; + + /* We ran process filters early as a prefilter to drop unrelated + * events early. Now we need to ensure that active pid sselectors + * have their arg filters run. + */ + if (selidx > SELECTORS_ACTIVE) + return filter_args_reject(e->func_id); + + if (e->sel.active[selidx]) { + int pass = selector_arg_offset(f, e, selidx, is_entry); + + if (pass) + return pass; + } + return 0; +} + +FUNC_INLINE long generic_filter_arg(void *ctx, struct bpf_map_def *tailcalls, + bool is_entry) +{ + struct msg_generic_kprobe *e; + int selidx, pass, zero = 0; + + e = map_lookup_elem(&process_call_heap, &zero); + if (!e) + return 0; + selidx = e->tailcall_index_selector; + pass = filter_args(e, selidx & MAX_SELECTORS_MASK, is_entry); + if (!pass) { + selidx++; + if (selidx <= MAX_SELECTORS && e->sel.active[selidx & MAX_SELECTORS_MASK]) { + e->tailcall_index_selector = selidx; + tail_call(ctx, tailcalls, TAIL_CALL_ARGS); + } + // reject if we did not attempt to tailcall, or if tailcall failed. + return filter_args_reject(e->func_id); + } + + // If pass >1 then we need to consult the selector actions + // otherwise pass==1 indicates using default action. + if (pass > 1) { + e->pass = pass; + tail_call(ctx, tailcalls, TAIL_CALL_ACTIONS); + } + + tail_call(ctx, tailcalls, TAIL_CALL_SEND); + return 0; +} #endif /* __GENERIC_CALLS_H__ */ diff --git a/bpf/process/generic_maps.h b/bpf/process/generic_maps.h new file mode 100644 index 00000000000..193b177e68a --- /dev/null +++ b/bpf/process/generic_maps.h @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright Authors of Cilium */ + +#ifndef __GENERIC_MAPS_H__ +#define __GENERIC_MAPS_H__ + +#include "lib/data_msg.h" + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct msg_generic_kprobe); +} process_call_heap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); // will be resized by agent when needed + __type(key, __u64); + __type(value, __s32); +} override_tasks SEC(".maps"); + +#ifdef __LARGE_BPF_PROG +#if defined(GENERIC_TRACEPOINT) || defined(GENERIC_UPROBE) +#define data_heap_ptr 0 +#else +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct msg_data); +} data_heap SEC(".maps"); +#define data_heap_ptr (struct bpf_map_def *)&data_heap +#endif +#else +#define data_heap_ptr 0 +#endif + +struct filter_map_value { + unsigned char buf[FILTER_SIZE]; +}; + +/* Arrays of size 1 will be rewritten to direct loads in verifier */ +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct filter_map_value); +} filter_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct event_config); +} config_map SEC(".maps"); + +#endif // __GENERIC_MAPS_H__ diff --git a/bpf/process/pfilter.h b/bpf/process/pfilter.h index 4bd630d05a3..eb943da93c3 100644 --- a/bpf/process/pfilter.h +++ b/bpf/process/pfilter.h @@ -2,8 +2,7 @@ #define __PFILTER_H__ /** - * Process filters - * see generic_process_filter below + * Process filters (see generic_process_filter) */ #define FIND_PIDSET(value, isns) \ @@ -540,72 +539,4 @@ process_filter_done(struct msg_selector_data *sel, return PFILTER_ACCEPT; return PFILTER_REJECT; } - -// generic_process_filter performs first pass filtering based on pid/nspid. -// We keep a list of selectors that pass. -// -// if filter check was successful, it will return PFILTER_ACCEPT and properly -// set the values of: -// current->pid -// current->ktime -// for the memory located at index 0 of @msg_heap assuming the value follows the -// msg_generic_hdr structure. -FUNC_INLINE int -generic_process_filter(struct bpf_map_def *heap, struct bpf_map_def *fmap) -{ - struct execve_map_value *enter; - struct msg_generic_kprobe *msg; - struct msg_execve_key *current; - struct msg_selector_data *sel; - int curr, zero = 0; - bool walker = 0; - __u32 ppid; - - msg = map_lookup_elem(heap, &zero); - if (!msg) - return 0; - - enter = event_find_curr(&ppid, &walker); - if (enter) { - int selectors, pass; - __u32 *f = map_lookup_elem(fmap, &msg->idx); - - if (!f) - return PFILTER_ERROR; - - sel = &msg->sel; - current = &msg->current; - - curr = sel->curr; - if (curr > MAX_SELECTORS) - return process_filter_done(sel, enter, current); - - selectors = f[0]; - /* If no selectors accept process */ - if (!selectors) { - sel->pass = true; - return process_filter_done(sel, enter, current); - } - - /* If we get here with reference to uninitialized selector drop */ - if (selectors <= curr) - return process_filter_done(sel, enter, current); - - pass = selector_process_filter(f, curr, enter, msg); - if (pass) { - /* Verify lost that msg is not null here so recheck */ - asm volatile("%[curr] &= 0x1f;\n" - : [curr] "+r"(curr)); - sel->active[curr] = true; - sel->active[SELECTORS_ACTIVE] = true; - sel->pass |= true; - } - sel->curr++; - if (sel->curr > selectors) - return process_filter_done(sel, enter, current); - return PFILTER_CONTINUE; /* will iterate to the next selector */ - } - return PFILTER_CURR_NOT_FOUND; -} - #endif /* __PFILTER_H__ */ diff --git a/bpf/process/types/basic.h b/bpf/process/types/basic.h index cd7fcb2abb6..13a280b1ed4 100644 --- a/bpf/process/types/basic.h +++ b/bpf/process/types/basic.h @@ -124,14 +124,6 @@ enum { TAIL_CALL_SEND = 5, }; -struct generic_maps { - struct bpf_map_def *heap; - struct bpf_map_def *calls; - struct bpf_map_def *config; - struct bpf_map_def *filter; - struct bpf_map_def *override; -}; - struct selector_action { __u32 actionlen; __u32 act[]; @@ -1769,37 +1761,6 @@ FUNC_INLINE int filter_args_reject(u64 id) return 0; } -FUNC_INLINE int -filter_args(struct msg_generic_kprobe *e, int selidx, void *filter_map, - bool is_entry) -{ - __u8 *f; - - /* No filters and no selectors so just accepts */ - f = map_lookup_elem(filter_map, &e->idx); - if (!f) { - return 1; - } - - /* No selectors, accept by default */ - if (!e->sel.active[SELECTORS_ACTIVE]) - return 1; - - /* We ran process filters early as a prefilter to drop unrelated - * events early. Now we need to ensure that active pid sselectors - * have their arg filters run. - */ - if (selidx > SELECTORS_ACTIVE) - return filter_args_reject(e->func_id); - - if (e->sel.active[selidx]) { - int pass = selector_arg_offset(f, e, selidx, is_entry); - if (pass) - return pass; - } - return 0; -} - struct fdinstall_key { __u64 tid; __u32 fd; @@ -2172,277 +2133,6 @@ FUNC_INLINE void do_action_notify_enforcer(struct msg_generic_kprobe *e, #define do_action_notify_enforcer(e, error, signal, info_arg_id) #endif -FUNC_LOCAL __u32 -do_action(void *ctx, __u32 i, struct selector_action *actions, - struct generic_maps *maps, bool *post) -{ - struct bpf_map_def *override_tasks = maps->override; - int signal __maybe_unused = FGS_SIGKILL; - int action = actions->act[i]; - struct msg_generic_kprobe *e; - __s32 error, *error_p; - int fdi, namei; - int newfdi, oldfdi; - int socki; - int argi __maybe_unused; - int err = 0; - int zero = 0; - __u64 id; - - e = map_lookup_elem(maps->heap, &zero); - if (!e) - return 0; - - switch (action) { - case ACTION_NOPOST: - *post = false; - break; - case ACTION_POST: { - __u64 ratelimit_interval __maybe_unused = actions->act[++i]; - __u64 ratelimit_scope __maybe_unused = actions->act[++i]; -#ifdef __LARGE_BPF_PROG - if (rate_limit(ratelimit_interval, ratelimit_scope, e)) - *post = false; -#endif /* __LARGE_BPF_PROG */ - __u32 kernel_stack_trace = actions->act[++i]; - - if (kernel_stack_trace) { - // Stack id 0 is valid so we need a flag. - e->common.flags |= MSG_COMMON_FLAG_KERNEL_STACKTRACE; - // We could use BPF_F_REUSE_STACKID to override old with new stack if - // same stack id. It means that if we have a collision and user space - // reads the old one too late, we are reading the wrong stack (the new, - // old one was overwritten). - // - // Here we just signal that there was a collision returning -EEXIST. - e->kernel_stack_id = get_stackid(ctx, &stack_trace_map, 0); - } - - __u32 user_stack_trace = actions->act[++i]; - - if (user_stack_trace) { - e->common.flags |= MSG_COMMON_FLAG_USER_STACKTRACE; - e->user_stack_id = get_stackid(ctx, &stack_trace_map, BPF_F_USER_STACK); - } -#ifdef __LARGE_MAP_KEYS - __u32 ima_hash = actions->act[++i]; - - if (ima_hash) - e->common.flags |= MSG_COMMON_FLAG_IMA_HASH; -#endif - break; - } - - case ACTION_UNFOLLOWFD: - case ACTION_FOLLOWFD: - fdi = actions->act[++i]; - namei = actions->act[++i]; - err = installfd(e, fdi, namei, action == ACTION_FOLLOWFD); - break; - case ACTION_COPYFD: - oldfdi = actions->act[++i]; - newfdi = actions->act[++i]; - err = copyfd(e, oldfdi, newfdi); - break; - case ACTION_SIGNAL: - signal = actions->act[++i]; - case ACTION_SIGKILL: - do_action_signal(signal); - break; - case ACTION_OVERRIDE: - error = actions->act[++i]; - id = get_current_pid_tgid(); - - if (!override_tasks) - break; - /* - * TODO: this should not happen, it means that the override - * program was not executed for some reason, we should do - * warning in here - */ - error_p = map_lookup_elem(override_tasks, &id); - if (error_p) - *error_p = error; - else - map_update_elem(override_tasks, &id, &error, BPF_ANY); - break; - case ACTION_GETURL: - case ACTION_DNSLOOKUP: - /* Set the URL or DNS action */ - e->action_arg_id = actions->act[++i]; - break; - case ACTION_TRACKSOCK: - case ACTION_UNTRACKSOCK: - socki = actions->act[++i]; - err = tracksock(e, socki, action == ACTION_TRACKSOCK); - break; - case ACTION_NOTIFY_ENFORCER: - error = actions->act[++i]; - signal = actions->act[++i]; - argi = actions->act[++i]; - do_action_notify_enforcer(e, error, signal, argi); - break; - case ACTION_CLEANUP_ENFORCER_NOTIFICATION: - do_enforcer_cleanup(); - default: - break; - } - if (!err) { - e->action = action; - return ++i; - } - return 0; -} - -FUNC_INLINE bool -has_action(struct selector_action *actions, __u32 idx) -{ - __u32 offset = idx * sizeof(__u32) + sizeof(*actions); - - return offset < actions->actionlen; -} - -/* Currently supporting 2 actions for selector. */ -FUNC_INLINE bool -do_actions(void *ctx, struct selector_action *actions, struct generic_maps *maps) -{ - bool post = true; - __u32 l, i = 0; - -#ifndef __LARGE_BPF_PROG -#pragma unroll -#endif - for (l = 0; l < MAX_ACTIONS; l++) { - if (!has_action(actions, i)) - break; - i = do_action(ctx, i, actions, maps, &post); - } - - return post; -} - -FUNC_INLINE long -filter_read_arg(void *ctx, struct bpf_map_def *heap, - struct bpf_map_def *filter, struct bpf_map_def *tailcalls, - struct bpf_map_def *config_map, bool is_entry) -{ - struct msg_generic_kprobe *e; - int selidx, pass, zero = 0; - - e = map_lookup_elem(heap, &zero); - if (!e) - return 0; - selidx = e->tailcall_index_selector; - pass = filter_args(e, selidx & MAX_SELECTORS_MASK, filter, is_entry); - if (!pass) { - selidx++; - if (selidx <= MAX_SELECTORS && e->sel.active[selidx & MAX_SELECTORS_MASK]) { - e->tailcall_index_selector = selidx; - tail_call(ctx, tailcalls, TAIL_CALL_ARGS); - } - // reject if we did not attempt to tailcall, or if tailcall failed. - return filter_args_reject(e->func_id); - } - - // If pass >1 then we need to consult the selector actions - // otherwise pass==1 indicates using default action. - if (pass > 1) { - e->pass = pass; - tail_call(ctx, tailcalls, TAIL_CALL_ACTIONS); - } - - tail_call(ctx, tailcalls, TAIL_CALL_SEND); - return 0; -} - -FUNC_INLINE long -generic_actions(void *ctx, struct generic_maps *maps) -{ - struct selector_arg_filters *arg; - struct selector_action *actions; - struct msg_generic_kprobe *e; - int actoff, pass, zero = 0; - bool postit; - __u8 *f; - - e = map_lookup_elem(maps->heap, &zero); - if (!e) - return 0; - - pass = e->pass; - if (pass <= 1) - return 0; - - f = map_lookup_elem(maps->filter, &e->idx); - if (!f) - return 0; - - asm volatile("%[pass] &= 0x7ff;\n" - : [pass] "+r"(pass) - :); - arg = (struct selector_arg_filters *)&f[pass]; - - actoff = pass + arg->arglen; - asm volatile("%[actoff] &= 0x7ff;\n" - : [actoff] "+r"(actoff) - :); - actions = (struct selector_action *)&f[actoff]; - - postit = do_actions(ctx, actions, maps); - if (postit) - tail_call(ctx, maps->calls, TAIL_CALL_SEND); - return postit; -} - -FUNC_INLINE long -generic_output(void *ctx, struct bpf_map_def *heap, u8 op) -{ - struct msg_generic_kprobe *e; - int zero = 0; - size_t total; - - e = map_lookup_elem(heap, &zero); - if (!e) - return 0; - -/* We don't need this data in return kprobe event */ -#ifndef GENERIC_KRETPROBE -#ifdef __NS_CHANGES_FILTER - /* update the namespaces if we matched a change on that */ - if (e->sel.match_ns) { - __u32 pid = (get_current_pid_tgid() >> 32); - struct task_struct *task = - (struct task_struct *)get_current_task(); - struct execve_map_value *enter = execve_map_get_noinit( - pid); // we don't want to init that if it does not exist - if (enter) - get_namespaces(&(enter->ns), task); - } -#endif -#ifdef __CAP_CHANGES_FILTER - /* update the capabilities if we matched a change on that */ - if (e->sel.match_cap) { - __u32 pid = (get_current_pid_tgid() >> 32); - struct task_struct *task = - (struct task_struct *)get_current_task(); - struct execve_map_value *enter = execve_map_get_noinit( - pid); // we don't want to init that if it does not exist - if (enter) - get_current_subj_caps(&enter->caps, task); - } -#endif -#endif // !GENERIC_KRETPROBE - - total = e->common.size + generic_kprobe_common_size(); - /* Code movement from clang forces us to inline bounds checks here */ - asm volatile("%[total] &= 0x7fff;\n" - "if %[total] < 9000 goto +1\n;" - "%[total] = 9000;\n" - : [total] "+r"(total)); - perf_event_output_metric(ctx, op, &tcpmon_map, BPF_F_CURRENT_CPU, e, total); - return 0; -} - /** * Read a generic argument * diff --git a/pkg/sensors/tracing/tracepoint_test.go b/pkg/sensors/tracing/tracepoint_test.go index 27c10ee10ac..57f6e965421 100644 --- a/pkg/sensors/tracing/tracepoint_test.go +++ b/pkg/sensors/tracing/tracepoint_test.go @@ -438,7 +438,7 @@ func TestLoadTracepointSensor(t *testing.T) { var sensorMaps = []tus.SensorMap{ // all programs - tus.SensorMap{Name: "tp_heap", Progs: []uint{0, 1, 2, 3, 4, 5}}, + tus.SensorMap{Name: "process_call_heap", Progs: []uint{0, 1, 2, 3, 4, 5}}, // all but generic_tracepoint_output tus.SensorMap{Name: "tp_calls", Progs: []uint{0, 1, 2, 3, 4}},