From e790789a4ccd92cd085710298017cf72f63fe484 Mon Sep 17 00:00:00 2001 From: Manny Wang Date: Tue, 29 Aug 2023 06:41:52 +0000 Subject: [PATCH] set sampling ratio of dropping for single syscall individually --- driver/bpf/fillers.h | 2 +- driver/bpf/plumbing_helpers.h | 17 +++- driver/bpf/types.h | 4 +- driver/main.c | 95 +++++++++++++++---- driver/modern_bpf/helpers/base/maps_getters.h | 12 +-- .../helpers/interfaces/attached_programs.h | 15 ++- driver/modern_bpf/maps/maps.h | 2 +- .../events/custom_logic/drop.bpf.c | 4 +- .../shared_definitions/struct_definitions.h | 3 +- driver/ppm_consumer.h | 6 +- driver/ppm_events_public.h | 1 + driver/ppm_fillers.c | 2 +- userspace/libpman/include/libpman.h | 15 ++- userspace/libpman/src/maps.c | 21 +++- userspace/libscap/engine/bpf/scap_bpf.c | 51 +++++++++- userspace/libscap/engine/kmod/scap_kmod.c | 26 +++++ .../engine/modern_bpf/scap_modern_bpf.c | 16 +++- userspace/libscap/scap.c | 12 +++ userspace/libscap/scap.h | 9 ++ userspace/libscap/scap_vtable.h | 7 ++ 20 files changed, 261 insertions(+), 59 deletions(-) diff --git a/driver/bpf/fillers.h b/driver/bpf/fillers.h index 138c07628ec..ae9173b351f 100644 --- a/driver/bpf/fillers.h +++ b/driver/bpf/fillers.h @@ -4939,7 +4939,7 @@ FILLER(sched_drop, false) /* * ratio */ - return bpf_push_u32_to_ring(data, data->settings->sampling_ratio); + return bpf_push_u32_to_ring(data, 0); } /* In this kernel version the instruction limit was bumped to 1000000 */ diff --git a/driver/bpf/plumbing_helpers.h b/driver/bpf/plumbing_helpers.h index f111e54e6a5..471f52ccfbf 100644 --- a/driver/bpf/plumbing_helpers.h +++ b/driver/bpf/plumbing_helpers.h @@ -493,6 +493,7 @@ static __always_inline bool drop_event(void *ctx, struct scap_bpf_settings *settings, enum syscall_flags drop_flags) { + long id; if (!settings->dropping_mode) return false; @@ -563,10 +564,16 @@ static __always_inline bool drop_event(void *ctx, if (drop_flags & UF_ALWAYS_DROP) return true; + id = bpf_syscall_get_nr(ctx); + if (id < 0 || id >= SYSCALL_TABLE_SIZE) + { + return false; + } + if (state->tail_ctx.ts % 1000000000 >= 1000000000 / - settings->sampling_ratio) { - if (!settings->is_dropping) { - settings->is_dropping = true; + settings->sampling_ratio[id]) { + if (!settings->is_dropping[id]) { + settings->is_dropping[id] = true; state->tail_ctx.evt_type = PPME_DROP_E; return false; } @@ -574,8 +581,8 @@ static __always_inline bool drop_event(void *ctx, return true; } - if (settings->is_dropping) { - settings->is_dropping = false; + if (settings->is_dropping[id]) { + settings->is_dropping[id] = false; state->tail_ctx.evt_type = PPME_DROP_X; return false; } diff --git a/driver/bpf/types.h b/driver/bpf/types.h index b8ac747cc57..4d1e095fe58 100644 --- a/driver/bpf/types.h +++ b/driver/bpf/types.h @@ -233,10 +233,10 @@ struct scap_bpf_settings { uint64_t boot_time; void *socket_file_ops; uint32_t snaplen; - uint32_t sampling_ratio; + uint32_t sampling_ratio[SYSCALL_TABLE_SIZE]; bool do_dynamic_snaplen; bool dropping_mode; - bool is_dropping; + bool is_dropping[SYSCALL_TABLE_SIZE]; bool drop_failed; bool tracers_enabled; uint16_t fullcapture_port_range_start; diff --git a/driver/main.c b/driver/main.c index 4cea5b04a4a..4db04db7973 100644 --- a/driver/main.c +++ b/driver/main.c @@ -394,6 +394,7 @@ static void check_remove_consumer(struct ppm_consumer_t *consumer, int remove_fr static int ppm_open(struct inode *inode, struct file *filp) { int ret; + int i; int in_list = false; #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20) int ring_no = iminor(filp->f_path.dentry->d_inode); @@ -531,9 +532,12 @@ static int ppm_open(struct inode *inode, struct file *filp) */ consumer->dropping_mode = 0; consumer->snaplen = SNAPLEN; - consumer->sampling_ratio = 1; - consumer->sampling_interval = 0; - consumer->is_dropping = 0; + for (i = 0; i < SYSCALL_TABLE_SIZE; i++) + { + consumer->sampling_ratio[i] = 1; + consumer->sampling_interval[i] = 0; + consumer->is_dropping[i] = false; + } consumer->do_dynamic_snaplen = false; consumer->drop_failed = false; consumer->need_to_insert_drop_e = 0; @@ -956,17 +960,23 @@ static long ppm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case PPM_IOCTL_DISABLE_DROPPING_MODE: { + int i; vpr_info("PPM_IOCTL_DISABLE_DROPPING_MODE, consumer %p\n", consumer_id); consumer->dropping_mode = 0; - consumer->sampling_interval = 1000000000; - consumer->sampling_ratio = 1; + for (i = 0; i < SYSCALL_TABLE_SIZE; i++) + { + consumer->sampling_interval[i] = 1000000000; + consumer->sampling_ratio[i] = 1; + consumer->is_dropping[i] = false; + } ret = 0; goto cleanup_ioctl; } case PPM_IOCTL_ENABLE_DROPPING_MODE: { + int i; u32 new_sampling_ratio; consumer->dropping_mode = 1; @@ -987,10 +997,13 @@ static long ppm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) goto cleanup_ioctl; } - consumer->sampling_interval = 1000000000 / new_sampling_ratio; - consumer->sampling_ratio = new_sampling_ratio; + for (i = 0; i < SYSCALL_TABLE_SIZE; i++) + { + consumer->sampling_interval[i] = 1000000000 / new_sampling_ratio; + consumer->sampling_ratio[i] = new_sampling_ratio; + } - vpr_info("new sampling ratio: %d\n", new_sampling_ratio); + vpr_info("new default sampling ratio: %d\n", new_sampling_ratio); ret = 0; goto cleanup_ioctl; @@ -1186,6 +1199,43 @@ static long ppm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ret = 0; goto cleanup_ioctl; } + case PPM_IOCTL_SET_DROPPING_RATIO: + { + u32 syscall_to_set = (arg >> 32) - SYSCALL_TABLE_ID0; + u32 new_sampling_ratio = (u32)arg; + + vpr_info("PPM_IOCTL_SET_DROPPING_RATIO, syscall(%u), ratio(%u), consumer %p\n", syscall_to_set, new_sampling_ratio, consumer_id); + + if (syscall_to_set >= SYSCALL_TABLE_SIZE) { + pr_err("invalid syscall %u\n", syscall_to_set); + ret = -EINVAL; + goto cleanup_ioctl; + } + + if ((g_syscall_table[syscall_to_set].flags & (UF_NEVER_DROP | UF_ALWAYS_DROP))) { + ret = -EPERM; + goto cleanup_ioctl; + } + + if (new_sampling_ratio != 1 && + new_sampling_ratio != 2 && + new_sampling_ratio != 4 && + new_sampling_ratio != 8 && + new_sampling_ratio != 16 && + new_sampling_ratio != 32 && + new_sampling_ratio != 64 && + new_sampling_ratio != 128) { + pr_err("invalid sampling ratio %u\n", new_sampling_ratio); + ret = -EINVAL; + goto cleanup_ioctl; + } + + consumer->sampling_interval[syscall_to_set] = 1000000000 / new_sampling_ratio; + consumer->sampling_ratio[syscall_to_set] = new_sampling_ratio; + pr_info("new sampling ratio %u, %u\n", syscall_to_set, new_sampling_ratio); + ret = 0; + goto cleanup_ioctl; + } default: ret = -ENOTTY; goto cleanup_ioctl; @@ -1660,6 +1710,7 @@ static inline int drop_nostate_event(ppm_event_code event_type, static inline int drop_event(struct ppm_consumer_t *consumer, ppm_event_code event_type, enum syscall_flags drop_flags, + long table_index, nanoseconds ns, struct pt_regs *regs) { @@ -1682,21 +1733,22 @@ static inline int drop_event(struct ppm_consumer_t *consumer, ASSERT((drop_flags & UF_NEVER_DROP) == 0); return 1; } + if (table_index != -1) { + if (consumer->sampling_interval[table_index] < SECOND_IN_NS && + /* do_div replaces ns2 with the quotient and returns the remainder */ + do_div(ns2, SECOND_IN_NS) >= consumer->sampling_interval[table_index]) { + if (!consumer->is_dropping[table_index]) { + consumer->is_dropping[table_index] = true; + record_drop_e(consumer, ns, drop_flags); + } - if (consumer->sampling_interval < SECOND_IN_NS && - /* do_div replaces ns2 with the quotient and returns the remainder */ - do_div(ns2, SECOND_IN_NS) >= consumer->sampling_interval) { - if (consumer->is_dropping == 0) { - consumer->is_dropping = 1; - record_drop_e(consumer, ns, drop_flags); + return 1; } - return 1; - } - - if (consumer->is_dropping == 1) { - consumer->is_dropping = 0; - record_drop_x(consumer, ns, drop_flags); + if (consumer->is_dropping[table_index]) { + consumer->is_dropping[table_index] = false; + record_drop_x(consumer, ns, drop_flags); + } } } @@ -1742,7 +1794,7 @@ static int record_event_consumer(struct ppm_consumer_t *consumer, int drop = 1; int32_t cbres = PPM_SUCCESS; int cpu; - long table_index; + long table_index = -1; int64_t retval; if (tp_type < INTERNAL_EVENTS && !(consumer->tracepoints_attached & (1 << tp_type))) @@ -1807,6 +1859,7 @@ static int record_event_consumer(struct ppm_consumer_t *consumer, if (drop_event(consumer, event_type, drop_flags, + table_index, ns, event_datap->event_info.syscall_data.regs)) return res; diff --git a/driver/modern_bpf/helpers/base/maps_getters.h b/driver/modern_bpf/helpers/base/maps_getters.h index db98869022f..ecf71673e0e 100644 --- a/driver/modern_bpf/helpers/base/maps_getters.h +++ b/driver/modern_bpf/helpers/base/maps_getters.h @@ -31,9 +31,9 @@ static __always_inline bool maps__get_dropping_mode() return g_settings.dropping_mode; } -static __always_inline uint32_t maps__get_sampling_ratio() +static __always_inline uint32_t maps__get_sampling_ratio(u32 syscall_id) { - return g_settings.sampling_ratio; + return g_settings.sampling_ratio[syscall_id]; } static __always_inline bool maps__get_drop_failed() @@ -65,14 +65,14 @@ static __always_inline uint16_t maps__get_statsd_port() /*=============================== KERNEL CONFIGS ===========================*/ -static __always_inline bool maps__get_is_dropping() +static __always_inline bool maps__get_is_dropping(u32 syscall_id) { - return is_dropping; + return is_dropping[syscall_id]; } -static __always_inline void maps__set_is_dropping(bool value) +static __always_inline void maps__set_is_dropping(u32 syscall_id, bool value) { - is_dropping = value; + is_dropping[syscall_id] = value; } /*=============================== KERNEL CONFIGS ===========================*/ diff --git a/driver/modern_bpf/helpers/interfaces/attached_programs.h b/driver/modern_bpf/helpers/interfaces/attached_programs.h index 88950eb70af..a1e21efec90 100644 --- a/driver/modern_bpf/helpers/interfaces/attached_programs.h +++ b/driver/modern_bpf/helpers/interfaces/attached_programs.h @@ -55,28 +55,33 @@ static __always_inline bool sampling_logic(void* ctx, u32 id, enum intrumentatio return true; } - if((bpf_ktime_get_boot_ns() % SECOND_TO_NS) >= (SECOND_TO_NS / maps__get_sampling_ratio())) + if (id < 0 || id >= SYSCALL_TABLE_SIZE) + { + return false; + } + + if((bpf_ktime_get_boot_ns() % SECOND_TO_NS) >= (SECOND_TO_NS / maps__get_sampling_ratio(id))) { /* If we are starting the dropping phase we need to notify the userspace, otherwise, we * simply drop our event. * PLEASE NOTE: this logic is not per-CPU so it is best effort! */ - if(!maps__get_is_dropping()) + if(!maps__get_is_dropping(id)) { /* Here we are not sure we can send the drop_e event to userspace * if the buffer is full, but this is not essential even if we lose * an iteration we will synchronize again the next time the logic is enabled. */ - maps__set_is_dropping(true); + maps__set_is_dropping(id, true); bpf_tail_call(ctx, &extra_event_prog_tail_table, T1_DROP_E); bpf_printk("unable to tail call into 'drop_e' prog"); } return true; } - if(maps__get_is_dropping()) + if(maps__get_is_dropping(id)) { - maps__set_is_dropping(false); + maps__set_is_dropping(id, false); bpf_tail_call(ctx, &extra_event_prog_tail_table, T1_DROP_X); bpf_printk("unable to tail call into 'drop_x' prog"); } diff --git a/driver/modern_bpf/maps/maps.h b/driver/modern_bpf/maps/maps.h index 508a58fad49..37b0e470f3c 100644 --- a/driver/modern_bpf/maps/maps.h +++ b/driver/modern_bpf/maps/maps.h @@ -83,7 +83,7 @@ __weak struct capture_settings g_settings; * @brief Variable used only kernel side to understand when we need to send * `DROP_E` and `DROP_X` events */ -__weak bool is_dropping; +__weak bool is_dropping[SYSCALL_TABLE_SIZE]; /*=============================== BPF GLOBAL VARIABLES ===============================*/ diff --git a/driver/modern_bpf/programs/tail_called/events/custom_logic/drop.bpf.c b/driver/modern_bpf/programs/tail_called/events/custom_logic/drop.bpf.c index 5fc3c975509..e595f89c5ef 100644 --- a/driver/modern_bpf/programs/tail_called/events/custom_logic/drop.bpf.c +++ b/driver/modern_bpf/programs/tail_called/events/custom_logic/drop.bpf.c @@ -22,7 +22,7 @@ int BPF_PROG(t1_drop_e) /*=============================== COLLECT PARAMETERS ===========================*/ - ringbuf__store_u32(&ringbuf, maps__get_sampling_ratio()); + ringbuf__store_u32(&ringbuf, 0); /*=============================== COLLECT PARAMETERS ===========================*/ @@ -47,7 +47,7 @@ int BPF_PROG(t1_drop_x) /*=============================== COLLECT PARAMETERS ===========================*/ - ringbuf__store_u32(&ringbuf, maps__get_sampling_ratio()); + ringbuf__store_u32(&ringbuf, 0); /*=============================== COLLECT PARAMETERS ===========================*/ diff --git a/driver/modern_bpf/shared_definitions/struct_definitions.h b/driver/modern_bpf/shared_definitions/struct_definitions.h index e48c09bcde1..c196410912d 100644 --- a/driver/modern_bpf/shared_definitions/struct_definitions.h +++ b/driver/modern_bpf/shared_definitions/struct_definitions.h @@ -17,6 +17,7 @@ */ #define AUXILIARY_MAP_SIZE 128 * 1024 +#define SYSCALL_TABLE_SIZE 512 /** * @brief General settings shared among all the CPUs. * @@ -26,7 +27,7 @@ struct capture_settings uint64_t boot_time; /* boot time. */ uint32_t snaplen; /* we use it when we want to read a maximum size from an event and no more. */ bool dropping_mode; /* this flag actives the sampling logic */ - uint32_t sampling_ratio; /* this config tells tracepoints when they have to drop events */ + uint32_t sampling_ratio[SYSCALL_TABLE_SIZE]; /* this config tells tracepoints when they have to drop events */ bool drop_failed; /* whether to drop failed syscalls (exit events) */ bool do_dynamic_snaplen; /* enforce snaplen according to the event content */ uint16_t fullcapture_port_range_start; /* first interesting port */ diff --git a/driver/ppm_consumer.h b/driver/ppm_consumer.h index 70801bce7d1..eedb3bd9625 100644 --- a/driver/ppm_consumer.h +++ b/driver/ppm_consumer.h @@ -21,10 +21,10 @@ struct ppm_consumer_t { struct ppm_ring_buffer_context *ring_buffers; #endif u32 snaplen; - u32 sampling_ratio; + uint16_t sampling_ratio[SYSCALL_TABLE_SIZE]; bool do_dynamic_snaplen; - u32 sampling_interval; - int is_dropping; + u32 sampling_interval[SYSCALL_TABLE_SIZE]; + bool is_dropping[SYSCALL_TABLE_SIZE]; int dropping_mode; bool drop_failed; volatile int need_to_insert_drop_e; diff --git a/driver/ppm_events_public.h b/driver/ppm_events_public.h index 1b5d75ed35e..674c38ed0f6 100644 --- a/driver/ppm_events_public.h +++ b/driver/ppm_events_public.h @@ -2076,6 +2076,7 @@ struct ppm_evt_hdr { #define PPM_IOCTL_DISABLE_TP _IO(PPM_IOCTL_MAGIC, 32) #define PPM_IOCTL_ENABLE_DROPFAILED _IO(PPM_IOCTL_MAGIC, 33) #define PPM_IOCTL_DISABLE_DROPFAILED _IO(PPM_IOCTL_MAGIC, 34) +#define PPM_IOCTL_SET_DROPPING_RATIO _IO(PPM_IOCTL_MAGIC, 35) #endif // CYGWING_AGENT extern const struct ppm_name_value socket_families[]; diff --git a/driver/ppm_fillers.c b/driver/ppm_fillers.c index 967f28cdd1f..2e1dd59fe8f 100644 --- a/driver/ppm_fillers.c +++ b/driver/ppm_fillers.c @@ -4525,7 +4525,7 @@ int f_sched_drop(struct event_filler_arguments *args) /* * ratio */ - res = val_to_ring(args, args->consumer->sampling_ratio, 0, false, 0); + res = val_to_ring(args, 0, 0, false, 0); if (unlikely(res != PPM_SUCCESS)) return res; diff --git a/userspace/libpman/include/libpman.h b/userspace/libpman/include/libpman.h index 4d382875f95..2c20abc1c41 100644 --- a/userspace/libpman/include/libpman.h +++ b/userspace/libpman/include/libpman.h @@ -337,7 +337,20 @@ extern "C" void pman_set_dropping_mode(bool value); - void pman_set_sampling_ratio(uint32_t value); + /** + * @brief Set sampling ratio for all syscalls + * + * @param value sampling ratio. + */ + void pman_set_default_sampling_ratio(uint32_t value); + + /** + * @brief Set sampling ratio for a syscall + * + * @param syscall_id syscall to set. + * @param value sampling ratio. + */ + int pman_set_sampling_ratio(uint32_t syscall_id, uint32_t value); /** * @brief Ask driver to drop failed syscalls. diff --git a/userspace/libpman/src/maps.c b/userspace/libpman/src/maps.c index d71a8785980..64d6d99524f 100644 --- a/userspace/libpman/src/maps.c +++ b/userspace/libpman/src/maps.c @@ -77,9 +77,12 @@ void pman_set_dropping_mode(bool value) g_state.skel->bss->g_settings.dropping_mode = value; } -void pman_set_sampling_ratio(uint32_t value) +void pman_set_default_sampling_ratio(uint32_t value) { - g_state.skel->bss->g_settings.sampling_ratio = value; + for (int i = 0; i < SYSCALL_TABLE_SIZE; i++) + { + g_state.skel->bss->g_settings.sampling_ratio[i] = value; + } } void pman_set_drop_failed(bool drop_failed) @@ -108,6 +111,18 @@ void pman_mark_single_64bit_syscall(int intersting_syscall_id, bool interesting) g_state.skel->bss->g_64bit_interesting_syscalls_table[intersting_syscall_id] = interesting; } +int pman_set_sampling_ratio(uint32_t syscall_id, uint32_t value) +{ + if(g_syscall_table[syscall_id].flags & (UF_NEVER_DROP | UF_ALWAYS_DROP) + || g_syscall_table[syscall_id].flags == UF_NONE + || !(g_syscall_table[syscall_id].flags & UF_USED)) + { + return 1; + } + g_state.skel->bss->g_settings.sampling_ratio[syscall_id] = value; + return 0; +} + void pman_fill_syscall_sampling_table() { for(int syscall_id = 0; syscall_id < SYSCALL_TABLE_SIZE; syscall_id++) @@ -348,7 +363,7 @@ int pman_finalize_maps_after_loading() /* set bpf global variables. */ pman_set_snaplen(80); pman_set_dropping_mode(false); - pman_set_sampling_ratio(1); + pman_set_default_sampling_ratio(1); pman_set_drop_failed(false); pman_set_do_dynamic_snaplen(false); pman_set_fullcapture_port_range(0, 0); diff --git a/userspace/libscap/engine/bpf/scap_bpf.c b/userspace/libscap/engine/bpf/scap_bpf.c index f0396a9ad38..90eb170b7fa 100644 --- a/userspace/libscap/engine/bpf/scap_bpf.c +++ b/userspace/libscap/engine/bpf/scap_bpf.c @@ -1243,8 +1243,10 @@ int32_t scap_bpf_start_dropping_mode(struct scap_engine_handle engine, uint32_t { return scap_errprintf(handle->m_lasterr, -ret, "SCAP_SETTINGS_MAP bpf_map_lookup_elem"); } - - settings.sampling_ratio = sampling_ratio; + for (int i = 0; i < SYSCALL_TABLE_SIZE; i++) + { + settings.sampling_ratio[i] = sampling_ratio; + } settings.dropping_mode = true; if((ret = bpf_map_update_elem(handle->m_bpf_map_fds[SCAP_SETTINGS_MAP], &k, &settings, BPF_ANY)) != 0) { @@ -1265,8 +1267,11 @@ int32_t scap_bpf_stop_dropping_mode(struct scap_engine_handle engine) { return scap_errprintf(handle->m_lasterr, -ret, "SCAP_SETTINGS_MAP bpf_map_lookup_elem"); } + for (int i = 0; i < SYSCALL_TABLE_SIZE; i++) + { + settings.sampling_ratio[i] = 1; + } - settings.sampling_ratio = 1; settings.dropping_mode = false; if((ret = bpf_map_update_elem(handle->m_bpf_map_fds[SCAP_SETTINGS_MAP], &k, &settings, BPF_ANY)) != 0) { @@ -1439,16 +1444,19 @@ static int32_t set_default_settings(struct bpf_engine *handle) settings.boot_time = boot_time; settings.socket_file_ops = NULL; settings.snaplen = SNAPLEN; - settings.sampling_ratio = 1; settings.do_dynamic_snaplen = false; settings.dropping_mode = false; - settings.is_dropping = false; settings.drop_failed = false; settings.tracers_enabled = false; settings.fullcapture_port_range_start = 0; settings.fullcapture_port_range_end = 0; settings.statsd_port = PPM_PORT_STATSD; + for (int i = 0; i < SYSCALL_TABLE_SIZE; i++) + { + settings.sampling_ratio[i] = 1; + settings.is_dropping[i] = false; + } int k = 0; int ret; @@ -1836,6 +1844,37 @@ static int32_t next(struct scap_engine_handle engine, OUT scap_evt** pevent, OUT return ringbuffer_next(&engine.m_handle->m_dev_set, pevent, pcpuid); } +static int32_t scap_bpf_set_drop_ratio(struct scap_engine_handle engine, uint32_t ratio, uint32_t sc) +{ + int ret; + int syscall_id = scap_ppm_sc_to_native_id(sc); + if(syscall_id == -1) + { + return SCAP_FAILURE; + } + struct bpf_engine* handle = engine.m_handle; + struct syscall_evt_pair sc_evt; + if ((ret = bpf_map_lookup_elem(handle->m_bpf_map_fds[SCAP_SYSCALL_TABLE], &syscall_id, &sc_evt)) != 0) + { + return scap_errprintf(handle->m_lasterr, -ret, "SCAP_SYSCALL_TABLE bpf_map_lookup_elem: %d", syscall_id); + } + if ((sc_evt.flags & (UF_NEVER_DROP | UF_ALWAYS_DROP))) { + return SCAP_FAILURE; + } + int k = 0; + struct scap_bpf_settings settings; + if((ret = bpf_map_lookup_elem(handle->m_bpf_map_fds[SCAP_SETTINGS_MAP], &k, &settings)) != 0) + { + return scap_errprintf(handle->m_lasterr, -ret, "SCAP_SETTINGS_MAP bpf_map_lookup_elem"); + } + settings.sampling_ratio[syscall_id] = ratio; + if((ret = bpf_map_update_elem(handle->m_bpf_map_fds[SCAP_SETTINGS_MAP], &k, &settings, BPF_ANY)) != 0) + { + return scap_errprintf(handle->m_lasterr, -ret, "SCAP_SETTINGS_MAP bpf_map_update_elem"); + } + return SCAP_SUCCESS; +} + static int32_t unsupported_config(struct scap_engine_handle engine, const char* msg) { struct bpf_engine* handle = engine.m_handle; @@ -1917,6 +1956,8 @@ static int32_t configure(struct scap_engine_handle engine, enum scap_setting set return scap_bpf_set_fullcapture_port_range(engine, arg1, arg2); case SCAP_STATSD_PORT: return scap_bpf_set_statsd_port(engine, arg1); + case SCAP_DROPPING_RATIO: + return scap_bpf_set_drop_ratio(engine, arg1, arg2); default: { char msg[SCAP_LASTERR_SIZE]; diff --git a/userspace/libscap/engine/kmod/scap_kmod.c b/userspace/libscap/engine/kmod/scap_kmod.c index 47fc113280e..3fcfe3f5a40 100644 --- a/userspace/libscap/engine/kmod/scap_kmod.c +++ b/userspace/libscap/engine/kmod/scap_kmod.c @@ -175,6 +175,19 @@ static int32_t mark_syscall(struct kmod_engine* handle, uint32_t ioctl_op, int s return SCAP_SUCCESS; } +static int32_t set_dropping_ratio(struct kmod_engine* handle, int syscall_id, int ratio) +{ + struct scap_device_set *devset = &handle->m_dev_set; + if(ioctl(devset->m_devs[0].m_fd, PPM_IOCTL_SET_DROPPING_RATIO, ((uint64_t)syscall_id << 32) + ratio)) + { + ASSERT(false); + return scap_errprintf(handle->m_lasterr, errno, + "%s(%d) failed for syscall %d(%d)", + __FUNCTION__, PPM_IOCTL_SET_DROPPING_RATIO, syscall_id, ratio); + } + return SCAP_SUCCESS; +} + static int enforce_sc_set(struct kmod_engine* handle) { /* handle->capturing == false means that we want to disable the capture */ @@ -835,6 +848,17 @@ int32_t scap_kmod_set_statsd_port(struct scap_engine_handle engine, const uint16 return SCAP_SUCCESS; } +static int32_t scap_kmod_set_dropping_ratio(struct scap_engine_handle engine, uint32_t ratio, uint32_t sc) +{ + struct kmod_engine* handle = engine.m_handle; + int syscall_id = scap_ppm_sc_to_native_id(sc); + if(syscall_id == -1) + { + return SCAP_FAILURE; + } + return set_dropping_ratio(handle, syscall_id, ratio); +} + static int32_t unsupported_config(struct scap_engine_handle engine, const char* msg) { struct kmod_engine* handle = engine.m_handle; @@ -874,6 +898,8 @@ static int32_t configure(struct scap_engine_handle engine, enum scap_setting set return scap_kmod_set_fullcapture_port_range(engine, arg1, arg2); case SCAP_STATSD_PORT: return scap_kmod_set_statsd_port(engine, arg1); + case SCAP_DROPPING_RATIO: + return scap_kmod_set_dropping_ratio(engine, arg1, arg2); default: { char msg[256]; diff --git a/userspace/libscap/engine/modern_bpf/scap_modern_bpf.c b/userspace/libscap/engine/modern_bpf/scap_modern_bpf.c index 797d867ec0c..bb973887517 100644 --- a/userspace/libscap/engine/modern_bpf/scap_modern_bpf.c +++ b/userspace/libscap/engine/modern_bpf/scap_modern_bpf.c @@ -68,14 +68,14 @@ static int32_t scap_modern_bpf__next(struct scap_engine_handle engine, OUT scap_ static int32_t scap_modern_bpf_start_dropping_mode(struct scap_engine_handle engine, uint32_t sampling_ratio) { - pman_set_sampling_ratio(sampling_ratio); + pman_set_default_sampling_ratio(sampling_ratio); pman_set_dropping_mode(true); return SCAP_SUCCESS; } int32_t scap_modern_bpf_stop_dropping_mode() { - pman_set_sampling_ratio(1); + pman_set_default_sampling_ratio(1); pman_set_dropping_mode(false); return SCAP_SUCCESS; } @@ -92,6 +92,16 @@ static int32_t scap_modern_bpf_handle_sc(struct scap_engine_handle engine, uint3 return SCAP_SUCCESS; } +static int32_t scap_modern_bpf_dropping_ratio(struct scap_engine_handle engine, uint32_t ratio, uint32_t sc) +{ + int syscall_id = scap_ppm_sc_to_native_id(sc); + if(syscall_id == -1) + { + return SCAP_FAILURE; + } + return pman_set_sampling_ratio(syscall_id, ratio) == 0 ? SCAP_SUCCESS : SCAP_FAILURE; +} + static int32_t scap_modern_bpf__configure(struct scap_engine_handle engine, enum scap_setting setting, unsigned long arg1, unsigned long arg2) { switch(setting) @@ -125,6 +135,8 @@ static int32_t scap_modern_bpf__configure(struct scap_engine_handle engine, enum case SCAP_STATSD_PORT: pman_set_statsd_port(arg1); break; + case SCAP_DROPPING_RATIO: + return scap_modern_bpf_dropping_ratio(engine, arg1, arg2); default: { char msg[SCAP_LASTERR_SIZE]; diff --git a/userspace/libscap/scap.c b/userspace/libscap/scap.c index 6864cb3f069..a478759e8ca 100644 --- a/userspace/libscap/scap.c +++ b/userspace/libscap/scap.c @@ -719,3 +719,15 @@ uint64_t scap_get_driver_schema_version(scap_t* handle) return 0; } + +int32_t scap_set_dropping_ratio(scap_t* handle, ppm_sc_code ppm_sc, uint32_t sampling_ratio) +{ + if(handle->m_vtable) + { + return handle->m_vtable->configure(handle->m_engine, SCAP_DROPPING_RATIO, sampling_ratio, ppm_sc); + } + + snprintf(handle->m_lasterr, SCAP_LASTERR_SIZE, "operation not supported"); + ASSERT(false); + return SCAP_FAILURE; +} diff --git a/userspace/libscap/scap.h b/userspace/libscap/scap.h index 4ae1eb7d7d8..fa0e48d6c0a 100644 --- a/userspace/libscap/scap.h +++ b/userspace/libscap/scap.h @@ -913,6 +913,15 @@ uint64_t scap_get_driver_api_version(scap_t* handle); */ uint64_t scap_get_driver_schema_version(scap_t* handle); +/*! + \brief Set sampling ratio of dropping for single syscall individually. + + \param handle Handle to the capture instance. + \param ppm_sc id (example PPM_SC_CHOWN) + \param sampling_ratio sampling ratio of the syscall. + */ +int32_t scap_set_dropping_ratio(scap_t* handle, ppm_sc_code ppm_sc, uint32_t sampling_ratio); + #ifdef __cplusplus } #endif diff --git a/userspace/libscap/scap_vtable.h b/userspace/libscap/scap_vtable.h index 086d87851df..5b2965cce74 100644 --- a/userspace/libscap/scap_vtable.h +++ b/userspace/libscap/scap_vtable.h @@ -85,6 +85,13 @@ enum scap_setting { * arg1: whether to enabled or disable the feature */ SCAP_DROP_FAILED, + + /** + * @brief set sampling ratio of dropping for single syscall individually. + * arg1: sampling ratio of the syscall. + * arg2 id: (example PPM_SC_CHOWN) + */ + SCAP_DROPPING_RATIO, }; struct scap_savefile_vtable {