diff --git a/.github/workflows/test-eunomia.yaml b/.github/workflows/test-eunomia.yaml index 7cf0cbf..7a38289 100644 --- a/.github/workflows/test-eunomia.yaml +++ b/.github/workflows/test-eunomia.yaml @@ -32,7 +32,35 @@ jobs: run: | ./ecc src/2-kprobe-unlink/kprobe-link.bpf.c sudo timeout -s 2 3 ./ecli run src/2-kprobe-unlink/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 4 opensnoop + run: | + ./ecc src/4-opensnoop/opensnoop.bpf.c + sudo timeout -s 2 3 ./ecli run src/4-opensnoop/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 5 bashreadline + run: | + ./ecc src/5-uprobe-bashreadline/bashreadline.bpf.c + sudo timeout -s 2 3 ./ecli run src/5-uprobe-bashreadline/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 6 sigsnoop + run: | + ./ecc src/6-sigsnoop/sigsnoop.bpf.c + sudo timeout -s 2 3 ./ecli run src/6-sigsnoop/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi - name: test 7 execsnoop run: | ./ecc src/7-execsnoop/execsnoop.bpf.c src/7-execsnoop/execsnoop.h - sudo timeout -s 2 3 ./ecli run src/7-execsnoop/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi \ No newline at end of file + sudo timeout -s 2 3 ./ecli run src/7-execsnoop/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 8 exitsnoop + run: | + ./ecc src/8-exitsnoop/exitsnoop.bpf.c src/8-exitsnoop/exitsnoop.h + sudo timeout -s 2 3 ./ecli run src/8-exitsnoop/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 9 runqlat + run: | + ./ecc src/9-runqlat/runqlat.bpf.c src/9-runqlat/runqlat.h + sudo timeout -s 2 3 ./ecli run src/9-runqlat/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 10 hardirqs + run: | + ./ecc src/10-hardirqs/hardirqs.bpf.c src/10-hardirqs/hardirqs.h + sudo timeout -s 2 3 ./ecli run src/10-hardirqs/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 20 tc + run: | + ./ecc src/20-tc/tc.bpf.c + sudo timeout -s 2 3 ./ecli run src/20-tc/package.json || if [ $? = 124 ]; then exit 0; else exit $?; fi diff --git a/.github/workflows/test-libbpf.yml b/.github/workflows/test-libbpf.yml index dd0370f..0fd5ffb 100644 --- a/.github/workflows/test-libbpf.yml +++ b/.github/workflows/test-libbpf.yml @@ -27,3 +27,19 @@ jobs: run: | make -C src/12-profile # sudo timeout -s 2 3 src/12-profile/profile || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 13 tcpconnlat + run: | + make -C src/13-tcpconnlat + # sudo timeout -s 2 3 src/13-tcpconnlat/tcpconnlat || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 14 tcpstates + run: | + make -C src/14-tcpstates + # sudo timeout -s 2 3 src/14-tcpstates/tcpstates || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 16 memleak + run: | + make -C src/16-memleak + # sudo timeout -s 2 3 src/16-memleak/memleak || if [ $? = 124 ]; then exit 0; else exit $?; fi + - name: test 17 biopattern + run: | + make -C src/17-biopattern + # sudo timeout -s 2 3 src/17-biopattern/biopattern || if [ $? = 124 ]; then exit 0; else exit $?; fi diff --git a/src/14-tcpstates/README_en.md b/src/14-tcpstates/README_en.md index 12346b0..2ef206f 100644 --- a/src/14-tcpstates/README_en.md +++ b/src/14-tcpstates/README_en.md @@ -25,7 +25,7 @@ In the above output, the most time is spent in the ESTABLISHED state, which indi In our upcoming tutorials, we will delve deeper into these two tools, explaining their implementation principles, and hopefully, these contents will help you in your work with eBPF for network and performance analysis. -## tcpstate +## tcpstate eBPF code Due to space constraints, here we mainly discuss and analyze the corresponding eBPF kernel-mode code implementation. The following is the eBPF code for tcpstate: @@ -108,7 +108,8 @@ int handle_set_state(struct trace_event_raw_inet_sock_set_state *ctx) } else { /* family == AF_INET6 */ bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32); - }.`bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event)); + } + bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event)); if (ctx->newstate == TCP_CLOSE) bpf_map_delete_elem(×tamps, &sk); @@ -135,7 +136,7 @@ In the `handle_set_state` function, it first determines whether the current TCP Finally, based on the new state of the TCP connection, the program performs different operations: if the new state is TCP_CLOSE, it means the connection has been closed and the program deletes the timestamp of that connection from the `timestamps` map; otherwise, the program updates the timestamp of the connection. -## User-Space Processing +## User-Space Processing for tcpstate The user-space part is mainly about loading the eBPF program using libbpf and receiving event data from the kernel using perf_event: @@ -184,7 +185,7 @@ In summary, the user-space part of the processing involves the following steps: The above is the main implementation logic of the user-space part of the `tcpstates` program. Through this chapter, you should have gained a deeper understanding of how to handle kernel events in user space. In the next chapter, we will introduce more knowledge about using eBPF for network monitoring. -### tcprtt +### tcprtt kernel eBPF code In this section, we will analyze the kernel BPF code of the `tcprtt` eBPF program. `tcprtt` is a program used to measure TCP Round Trip Time (RTT) and stores the RTT information in a histogram. @@ -233,16 +234,7 @@ int BPF_PROG(tcp_rcv, struct sock *sk) slot = log2l(srtt); if (slot >= MAX_SLOTS) slot = MAX_SLOTS - 1; -``` - -The code above declares a map called `hists`, which is a hash map used to store the histogram data. The `hists` map has a maximum number of entries defined as `MAX_ENTRIES`. - -The function `BPF_PROG(tcp_rcv, struct sock *sk)` is the entry point of the eBPF program for handling the `tcp_rcv_established` event. Within this function, the program retrieves various information from the network socket and checks if filtering conditions are met. Then, it performs operations on the histogram data structure. Finally, the program calculates the slot for the RTT value and updates the histogram accordingly. - -This is the main code logic of the `tcprtt` eBPF program in kernel mode. The eBPF program measures the RTT of TCP connections and maintains a histogram to collect and analyze the RTT data.Instructions: - -```c -__sync_fetch_and_add(&histp->slots[slot], 1); + __sync_fetch_and_add(&histp->slots[slot], 1); if (targ_show_ext) { __sync_fetch_and_add(&histp->latency, srtt); __sync_fetch_and_add(&histp->cnt, 1); @@ -251,6 +243,12 @@ __sync_fetch_and_add(&histp->slots[slot], 1); } ``` +The code above declares a map called `hists`, which is a hash map used to store the histogram data. The `hists` map has a maximum number of entries defined as `MAX_ENTRIES`. + +The function `BPF_PROG(tcp_rcv, struct sock *sk)` is the entry point of the eBPF program for handling the `tcp_rcv_established` event. Within this function, the program retrieves various information from the network socket and checks if filtering conditions are met. Then, it performs operations on the histogram data structure. Finally, the program calculates the slot for the RTT value and updates the histogram accordingly. + +This is the main code logic of the `tcprtt` eBPF program in kernel mode. The eBPF program measures the RTT of TCP connections and maintains a histogram to collect and analyze the RTT data.Instructions: + First, we define a hash type eBPF map called `hists`, which is used to store statistics information about RTT. In this map, the key is a 64-bit integer, and the value is a `hist` structure that contains an array to store the count of different RTT intervals. Next, we define an eBPF program called `tcp_rcv` which will be called every time a TCP packet is received in the kernel. In this program, we first filter TCP connections based on filtering conditions (source/destination IP address and port). If the conditions are met, we select the corresponding key (source IP, destination IP, or 0) based on the set parameters, and then look up or initialize the corresponding histogram in the `hists` map. diff --git a/src/15-javagc/README_en.md b/src/15-javagc/README_en.md index 742e974..9a35af8 100644 --- a/src/15-javagc/README_en.md +++ b/src/15-javagc/README_en.md @@ -20,7 +20,7 @@ Tracing uprobe cmd (p:cmd /opt/bin/mysqld:0x2dbd40 +0(%dx):string). Ctrl-C to en [...] ``` -Here, we use the `uprobe `tool, which leverages Linux's built-in functionalities: ftrace (tracing framework) and uprobes (User-Level Dynamic Tracing, requires a relatively new Linux version, around 4.0 or later). Other tracing frameworks such as perf_events and SystemTap can also achieve this functionality. +Here, we use the `uprobe` tool, which leverages Linux's built-in functionalities: ftrace (tracing framework) and uprobes (User-Level Dynamic Tracing, requires a relatively new Linux version, around 4.0 or later). Other tracing frameworks such as perf_events and SystemTap can also achieve this functionality. Many other MySQL functions can be traced to obtain more information. We can list and count the number of these functions: @@ -314,4 +314,4 @@ Summary.Through this introductory eBPF tutorial, we have learned how to use eBPF Additionally, we have also introduced some basic knowledge and practical techniques related to Java GC, USDT, and eBPF. This knowledge and skills are valuable for developers who want to delve into the field of network and system performance analysis. -If you would like to learn more about eBPF knowledge and practices, you can visit our tutorial code repository at or website to get more examples and the complete tutorial. \ No newline at end of file +If you would like to learn more about eBPF knowledge and practices, you can visit our tutorial code repository at or website to get more examples and the complete tutorial. diff --git a/src/16-memleak/Makefile b/src/16-memleak/Makefile index 84ead7e..8080259 100644 --- a/src/16-memleak/Makefile +++ b/src/16-memleak/Makefile @@ -24,13 +24,13 @@ INCLUDES := -I$(OUTPUT) -I../../libbpf/include/uapi -I$(dir $(VMLINUX)) CFLAGS := -g -Wall ALL_LDFLAGS := $(LDFLAGS) $(EXTRA_LDFLAGS) -APPS = memleak # minimal minimal_legacy uprobe kprobe fentry usdt sockfilter tc ksyscall +APPS = # minimal minimal_legacy bootstrap uprobe kprobe fentry usdt sockfilter tc ksyscall CARGO ?= $(shell which cargo) ifeq ($(strip $(CARGO)),) BZS_APPS := else -BZS_APPS := # profile +BZS_APPS := memleak APPS += $(BZS_APPS) # Required by libblazesym ALL_LDFLAGS += -lrt -ldl -lpthread -lm diff --git a/src/16-memleak/README_en.md b/src/16-memleak/README_en.md index c953784..a70c84b 100644 --- a/src/16-memleak/README_en.md +++ b/src/16-memleak/README_en.md @@ -442,4 +442,3 @@ Through this eBPF introductory tutorial, you have learned how to write a Memleak You can visit our tutorial code repository at or website for more examples and complete tutorials. -The next tutorial will further explore advanced features of eBPF, and we will continue to share more content related to eBPF development practices. We hope that this knowledge and skills will help you better understand and use eBPF to solve problems encountered in practical work. diff --git a/src/16-memleak/memleak.bpf.c b/src/16-memleak/memleak.bpf.c index aa213c8..c50e6e8 100644 --- a/src/16-memleak/memleak.bpf.c +++ b/src/16-memleak/memleak.bpf.c @@ -302,26 +302,101 @@ int BPF_KRETPROBE(pvalloc_exit) return gen_alloc_exit(ctx); } -SEC("tracepoint/kmem/kmalloc") -int memleak__kmalloc(struct trace_event_raw_kmem_alloc *ctx) +/** + * commit 11e9734bcb6a("mm/slab_common: unify NUMA and UMA version of + * tracepoints") drops kmem_alloc event class, rename kmem_alloc_node to + * kmem_alloc, so `trace_event_raw_kmem_alloc_node` is not existed any more. + * see: + * https://github.com/torvalds/linux/commit/11e9734bcb6a + */ +struct trace_event_raw_kmem_alloc_node___x { + const void *ptr; + size_t bytes_alloc; +} __attribute__((preserve_access_index)); + +static __always_inline bool has_kmem_alloc_node(void) { + if (bpf_core_type_exists(struct trace_event_raw_kmem_alloc_node___x)) + return true; + return false; +} + + +/** + * commit 2c1d697fb8ba("mm/slab_common: drop kmem_alloc & avoid dereferencing + * fields when not using") drops kmem_alloc event class. As a result, + * `trace_event_raw_kmem_alloc` is removed, `trace_event_raw_kmalloc` and + * `trace_event_raw_kmem_cache_alloc` are added. + * see: + * https://github.com/torvalds/linux/commit/2c1d697fb8ba + */ +struct trace_event_raw_kmem_alloc___x { + const void *ptr; + size_t bytes_alloc; +} __attribute__((preserve_access_index)); + +struct trace_event_raw_kmalloc___x { + const void *ptr; + size_t bytes_alloc; +} __attribute__((preserve_access_index)); + +struct trace_event_raw_kmem_cache_alloc___x { + const void *ptr; + size_t bytes_alloc; +} __attribute__((preserve_access_index)); + +static __always_inline bool has_kmem_alloc(void) +{ + if (bpf_core_type_exists(struct trace_event_raw_kmem_alloc___x)) + return true; + return false; +} + +SEC("tracepoint/kmem/kmalloc") +int memleak__kmalloc(void *ctx) +{ + const void *ptr; + size_t bytes_alloc; + + if (has_kmem_alloc()) { + struct trace_event_raw_kmem_alloc___x *args = ctx; + ptr = BPF_CORE_READ(args, ptr); + bytes_alloc = BPF_CORE_READ(args, bytes_alloc); + } else { + struct trace_event_raw_kmalloc___x *args = ctx; + ptr = BPF_CORE_READ(args, ptr); + bytes_alloc = BPF_CORE_READ(args, bytes_alloc); + } + if (wa_missing_free) - gen_free_enter(ctx->ptr); + gen_free_enter(ptr); - gen_alloc_enter(ctx->bytes_alloc); + gen_alloc_enter(bytes_alloc); - return gen_alloc_exit2(ctx, (u64)(ctx->ptr)); + return gen_alloc_exit2(ctx, (u64)ptr); } SEC("tracepoint/kmem/kmalloc_node") -int memleak__kmalloc_node(struct trace_event_raw_kmem_alloc_node *ctx) +int memleak__kmalloc_node(void *ctx) { - if (wa_missing_free) - gen_free_enter(ctx->ptr); + const void *ptr; + size_t bytes_alloc; - gen_alloc_enter(ctx->bytes_alloc); + if (has_kmem_alloc_node()) { + struct trace_event_raw_kmem_alloc_node___x *args = ctx; + ptr = BPF_CORE_READ(args, ptr); + bytes_alloc = BPF_CORE_READ(args, bytes_alloc); - return gen_alloc_exit2(ctx, (u64)(ctx->ptr)); + if (wa_missing_free) + gen_free_enter(ptr); + + gen_alloc_enter( bytes_alloc); + + return gen_alloc_exit2(ctx, (u64)ptr); + } else { + /* tracepoint is disabled if not exist, avoid compile warning */ + return 0; + } } SEC("tracepoint/kmem/kfree") @@ -341,25 +416,50 @@ int memleak__kfree(void *ctx) } SEC("tracepoint/kmem/kmem_cache_alloc") -int memleak__kmem_cache_alloc(struct trace_event_raw_kmem_alloc *ctx) +int memleak__kmem_cache_alloc(void *ctx) { + const void *ptr; + size_t bytes_alloc; + + if (has_kmem_alloc()) { + struct trace_event_raw_kmem_alloc___x *args = ctx; + ptr = BPF_CORE_READ(args, ptr); + bytes_alloc = BPF_CORE_READ(args, bytes_alloc); + } else { + struct trace_event_raw_kmem_cache_alloc___x *args = ctx; + ptr = BPF_CORE_READ(args, ptr); + bytes_alloc = BPF_CORE_READ(args, bytes_alloc); + } + if (wa_missing_free) - gen_free_enter(ctx->ptr); + gen_free_enter(ptr); - gen_alloc_enter(ctx->bytes_alloc); + gen_alloc_enter(bytes_alloc); - return gen_alloc_exit2(ctx, (u64)(ctx->ptr)); + return gen_alloc_exit2(ctx, (u64)ptr); } SEC("tracepoint/kmem/kmem_cache_alloc_node") -int memleak__kmem_cache_alloc_node(struct trace_event_raw_kmem_alloc_node *ctx) +int memleak__kmem_cache_alloc_node(void *ctx) { - if (wa_missing_free) - gen_free_enter(ctx->ptr); + const void *ptr; + size_t bytes_alloc; - gen_alloc_enter(ctx->bytes_alloc); + if (has_kmem_alloc_node()) { + struct trace_event_raw_kmem_alloc_node___x *args = ctx; + ptr = BPF_CORE_READ(args, ptr); + bytes_alloc = BPF_CORE_READ(args, bytes_alloc); - return gen_alloc_exit2(ctx, (u64)(ctx->ptr)); + if (wa_missing_free) + gen_free_enter(ptr); + + gen_alloc_enter(bytes_alloc); + + return gen_alloc_exit2(ctx, (u64)ptr); + } else { + /* tracepoint is disabled if not exist, avoid compile warning */ + return 0; + } } SEC("tracepoint/kmem/kmem_cache_free") @@ -406,4 +506,4 @@ int memleak__percpu_free_percpu(struct trace_event_raw_percpu_free_percpu *ctx) return gen_free_enter(ctx->ptr); } -char LICENSE[] SEC("license") = "GPL"; +char LICENSE[] SEC("license") = "GPL"; \ No newline at end of file diff --git a/src/16-memleak/memleak.c b/src/16-memleak/memleak.c index b106ebc..fa47b87 100644 --- a/src/16-memleak/memleak.c +++ b/src/16-memleak/memleak.c @@ -22,11 +22,8 @@ #include "memleak.h" #include "memleak.skel.h" -#include "trace_helpers.h" -#ifdef USE_BLAZESYM #include "blazesym.h" -#endif static struct env { int interval; @@ -86,6 +83,10 @@ struct allocation { struct allocation_node* allocations; }; +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + #define __ATTACH_UPROBE(skel, sym_name, prog_name, is_retprobe) \ do { \ LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts, \ @@ -132,13 +133,8 @@ static int event_notify(int fd, uint64_t event); static pid_t fork_sync_exec(const char *command, int fd); -#ifdef USE_BLAZESYM static void print_stack_frame_by_blazesym(size_t frame, uint64_t addr, const blazesym_csym *sym); static void print_stack_frames_by_blazesym(); -#else -static void print_stack_frames_by_ksyms(); -static void print_stack_frames_by_syms_cache(); -#endif static int print_stack_frames(struct allocation *allocs, size_t nr_allocs, int stack_traces_fd); static int alloc_size_compare(const void *a, const void *b); @@ -146,7 +142,6 @@ static int alloc_size_compare(const void *a, const void *b); static int print_outstanding_allocs(int allocs_fd, int stack_traces_fd); static int print_outstanding_combined_allocs(int combined_allocs_fd, int stack_traces_fd); -static bool has_kernel_node_tracepoints(); static void disable_kernel_node_tracepoints(struct memleak_bpf *skel); static void disable_kernel_percpu_tracepoints(struct memleak_bpf *skel); static void disable_kernel_tracepoints(struct memleak_bpf *skel); @@ -210,13 +205,8 @@ static struct sigaction sig_action = { static int child_exec_event_fd = -1; -#ifdef USE_BLAZESYM static blazesym *symbolizer; static sym_src_cfg src_cfg; -#else -struct syms_cache *syms_cache; -struct ksyms *ksyms; -#endif static void (*print_stack_frames_func)(); static uint64_t *stack; @@ -225,6 +215,14 @@ static struct allocation *allocs; static const char default_object[] = "libc.so.6"; +unsigned long long get_ktime_ns(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; +} + int main(int argc, char *argv[]) { int ret = 0; @@ -304,7 +302,6 @@ int main(int argc, char *argv[]) goto cleanup; } -#ifdef USE_BLAZESYM if (env.pid < 0) { src_cfg.src_type = SRC_T_KERNEL; src_cfg.params.kernel.kallsyms = NULL; @@ -313,7 +310,6 @@ int main(int argc, char *argv[]) src_cfg.src_type = SRC_T_PROCESS; src_cfg.params.process.pid = env.pid; } -#endif // allocate space for storing "allocation" structs if (env.combined_only) @@ -352,8 +348,7 @@ int main(int argc, char *argv[]) // disable kernel tracepoints based on settings or availability if (env.kernel_trace) { - if (!has_kernel_node_tracepoints()) - disable_kernel_node_tracepoints(skel); + disable_kernel_node_tracepoints(skel); if (!env.percpu) disable_kernel_percpu_tracepoints(skel); @@ -400,7 +395,6 @@ int main(int argc, char *argv[]) } } -#ifdef USE_BLAZESYM symbolizer = blazesym_new(); if (!symbolizer) { fprintf(stderr, "Failed to load blazesym\n"); @@ -409,28 +403,6 @@ int main(int argc, char *argv[]) goto cleanup; } print_stack_frames_func = print_stack_frames_by_blazesym; -#else - if (env.kernel_trace) { - ksyms = ksyms__load(); - if (!ksyms) { - fprintf(stderr, "Failed to load ksyms\n"); - ret = -ENOMEM; - - goto cleanup; - } - print_stack_frames_func = print_stack_frames_by_ksyms; - } else { - syms_cache = syms_cache__new(0); - if (!syms_cache) { - fprintf(stderr, "Failed to create syms_cache\n"); - ret = -ENOMEM; - - goto cleanup; - } - print_stack_frames_func = print_stack_frames_by_syms_cache; - } -#endif - printf("Tracing outstanding memory allocs... Hit Ctrl-C to end\n"); // main loop @@ -467,14 +439,7 @@ int main(int argc, char *argv[]) } cleanup: -#ifdef USE_BLAZESYM blazesym_free(symbolizer); -#else - if (syms_cache) - syms_cache__free(syms_cache); - if (ksyms) - ksyms__free(ksyms); -#endif memleak_bpf__destroy(skel); free(allocs); @@ -671,7 +636,6 @@ pid_t fork_sync_exec(const char *command, int fd) return pid; } -#if USE_BLAZESYM void print_stack_frame_by_blazesym(size_t frame, uint64_t addr, const blazesym_csym *sym) { if (!sym) @@ -721,51 +685,6 @@ void print_stack_frames_by_blazesym() blazesym_result_free(result); } -#else -void print_stack_frames_by_ksyms() -{ - for (size_t i = 0; i < env.perf_max_stack_depth; ++i) { - const uint64_t addr = stack[i]; - - if (addr == 0) - break; - - const struct ksym *ksym = ksyms__map_addr(ksyms, addr); - if (ksym) - printf("\t%zu [<%016lx>] %s+0x%lx\n", i, addr, ksym->name, addr - ksym->addr); - else - printf("\t%zu [<%016lx>] <%s>\n", i, addr, "null sym"); - } -} - -void print_stack_frames_by_syms_cache() -{ - const struct syms *syms = syms_cache__get_syms(syms_cache, env.pid); - if (!syms) { - fprintf(stderr, "Failed to get syms\n"); - return; - } - - for (size_t i = 0; i < env.perf_max_stack_depth; ++i) { - const uint64_t addr = stack[i]; - - if (addr == 0) - break; - - char *dso_name; - uint64_t dso_offset; - const struct sym *sym = syms__map_addr_dso(syms, addr, &dso_name, &dso_offset); - if (sym) { - printf("\t%zu [<%016lx>] %s+0x%lx", i, addr, sym->name, sym->offset); - if (dso_name) - printf(" [%s]", dso_name); - printf("\n"); - } else { - printf("\t%zu [<%016lx>] <%s>\n", i, addr, "null sym"); - } - } -} -#endif int print_stack_frames(struct allocation *allocs, size_t nr_allocs, int stack_traces_fd) { @@ -994,12 +913,6 @@ int print_outstanding_combined_allocs(int combined_allocs_fd, int stack_traces_f return 0; } -bool has_kernel_node_tracepoints() -{ - return tracepoint_exists("kmem", "kmalloc_node") && - tracepoint_exists("kmem", "kmem_cache_alloc_node"); -} - void disable_kernel_node_tracepoints(struct memleak_bpf *skel) { bpf_program__set_autoload(skel->progs.memleak__kmalloc_node, false); diff --git a/src/16-memleak/trace_helpers.c b/src/16-memleak/trace_helpers.c index 89c4835..feb2553 100644 --- a/src/16-memleak/trace_helpers.c +++ b/src/16-memleak/trace_helpers.c @@ -20,7 +20,6 @@ #include #include #include "trace_helpers.h" -#include "uprobe_helpers.h" #define min(x, y) ({ \ typeof(x) _min1 = (x); \ diff --git a/src/17-biopattern/README_en.md b/src/17-biopattern/README_en.md index 532786b..9ffae4c 100644 --- a/src/17-biopattern/README_en.md +++ b/src/17-biopattern/README_en.md @@ -125,7 +125,7 @@ int handle__block_rq_complete(void *args) char LICENSE[] SEC("license") = "GPL"; ``` -1. Global variable definitions +Global variable definitions: ```c const volatile bool filter_dev = false; @@ -134,7 +134,7 @@ char LICENSE[] SEC("license") = "GPL"; These two global variables are used for device filtering. `filter_dev` determines whether device filtering is enabled or not, and `targ_dev` is the identifier of the target device we want to track. -2. BPF map definition +BPF map definition: ```c struct { __uint(type, BPF_MAP_TYPE_HASH); @@ -145,7 +145,7 @@ These two global variables are used for device filtering. `filter_dev` determine This part of the code defines a BPF map of type hash table. The key of the map is the identifier of the device, and the value is a `counter` struct, which is used to store the I/O statistics of the device. -3. The tracepoint function +The tracepoint function: ```c SEC("tracepoint/block/block_rq_complete") @@ -223,7 +223,7 @@ To address this issue, the `biopattern` utility introduces a mechanism to dynami Two tracepoint structures are defined here, corresponding to different versions of the kernel. Each structure contains a device identifier `(dev` ), sector number `(sector` ), and number of sectors `(nr_sector` ). -2. **Dynamic detection of trackpoint structures**: +**Dynamic detection of trackpoint structures**: ```c static __always_inline bool has_block_rq_completion() @@ -262,7 +262,7 @@ This is the main loop of the `biopattern` utility, and its workflow is as follow - `print_map`: call `print_map` function to print the statistics in BPF map. - **Exit condition**: if an exit signal is received `(exiting` is `true` ) or if the specified number of runs is reached `(env.times` reaches 0), the loop exits. -2. Print mapping function +Print mapping function: ```c static int print_map(struct bpf_map *counters, struct partitions *partitions) diff --git a/src/18-further-reading/README.md b/src/18-further-reading/README.md index e24f310..323ef68 100644 --- a/src/18-further-reading/README.md +++ b/src/18-further-reading/README.md @@ -1,3 +1,6 @@ # 更多的参考资料 -TODO +可以在这里找到更多关于 eBPF 的信息: + +- +- diff --git a/src/18-further-reading/README_en.md b/src/18-further-reading/README_en.md index d944be2..ff754c6 100644 --- a/src/18-further-reading/README_en.md +++ b/src/18-further-reading/README_en.md @@ -1,3 +1,6 @@ # More Reference Materials -TODO \ No newline at end of file +You may find more about eBPF in these places: + +- A curated list of awesome projects related to eBPF: +- diff --git a/src/19-lsm-connect/README_en.md b/src/19-lsm-connect/README_en.md index adfaefb..b5cd8d3 100644 --- a/src/19-lsm-connect/README_en.md +++ b/src/19-lsm-connect/README_en.md @@ -165,4 +165,4 @@ If you want to learn more about eBPF knowledge and practices, you can visit our ## References + -+ \ No newline at end of file ++ diff --git a/src/20-tc/README_en.md b/src/20-tc/README_en.md index c0d5c2c..4d2925e 100644 --- a/src/20-tc/README_en.md +++ b/src/20-tc/README_en.md @@ -50,6 +50,7 @@ int tc_ingress(struct __sk_buff *ctx) char __license[] SEC("license") = "GPL"; ``` + This code defines an eBPF program that can capture and process packets through Linux TC (Transmission Control). In this program, we limit it to capture only IPv4 protocol packets, and then print out the total length and Time-To-Live (TTL) value of the packet using the bpf_printk function.Here is the translated text: " @@ -106,4 +107,4 @@ If you want to learn more about eBPF knowledge and practice, you can visit our t + + -" \ No newline at end of file +" diff --git a/src/21-xdp/README_en.md b/src/21-xdp/README_en.md index 5c1b635..ad7b88c 100644 --- a/src/21-xdp/README_en.md +++ b/src/21-xdp/README_en.md @@ -97,6 +97,8 @@ For those interested in further exploring eBPF, visit our tutorial code reposito ## References +For more information, you can refer to: + + + + diff --git a/src/22-android/README_en.md b/src/22-android/README_en.md index 52f9030..891e91a 100644 --- a/src/22-android/README_en.md +++ b/src/22-android/README_en.md @@ -1,6 +1,7 @@ # Using eBPF Programs on Android -> This article mainly documents the author's exploration process, results, and issues encountered while testing the level of support for CO-RE technology based on the libbpf library on high version Android kernels in the Android Studio Emulator. The test was conducted by building a Debian environment in the Android Shell environment and attempting to build the eunomia-bpf toolchain and run its test cases based on this. +> This article mainly documents the author's exploration process, results, and issues encountered while testing the level of support for CO-RE technology based on the libbpf library on high version Android kernels in the Android Studio Emulator. +> The test was conducted by building a Debian environment in the Android Shell environment and attempting to build the eunomia-bpf toolchain and run its test cases based on this. ## Background